common/mlx5: add per-lcore sharing flag in object list
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24 #include <rte_mtr.h>
25 #include <rte_mtr_driver.h>
26 #include <rte_tailq.h>
27
28 #include <mlx5_glue.h>
29 #include <mlx5_devx_cmds.h>
30 #include <mlx5_prm.h>
31 #include <mlx5_malloc.h>
32
33 #include "mlx5_defs.h"
34 #include "mlx5.h"
35 #include "mlx5_common_os.h"
36 #include "mlx5_flow.h"
37 #include "mlx5_flow_os.h"
38 #include "mlx5_rx.h"
39 #include "mlx5_tx.h"
40 #include "rte_pmd_mlx5.h"
41
42 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
43
44 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
45 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
46 #endif
47
48 #ifndef HAVE_MLX5DV_DR_ESWITCH
49 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
50 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
51 #endif
52 #endif
53
54 #ifndef HAVE_MLX5DV_DR
55 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
56 #endif
57
58 /* VLAN header definitions */
59 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
60 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
61 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
62 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
63 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
64
65 union flow_dv_attr {
66         struct {
67                 uint32_t valid:1;
68                 uint32_t ipv4:1;
69                 uint32_t ipv6:1;
70                 uint32_t tcp:1;
71                 uint32_t udp:1;
72                 uint32_t reserved:27;
73         };
74         uint32_t attr;
75 };
76
77 static int
78 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
79                              struct mlx5_flow_tbl_resource *tbl);
80
81 static int
82 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
83                                      uint32_t encap_decap_idx);
84
85 static int
86 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
87                                         uint32_t port_id);
88 static void
89 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
90
91 static int
92 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
93                                   uint32_t rix_jump);
94
95 /**
96  * Initialize flow attributes structure according to flow items' types.
97  *
98  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
99  * mode. For tunnel mode, the items to be modified are the outermost ones.
100  *
101  * @param[in] item
102  *   Pointer to item specification.
103  * @param[out] attr
104  *   Pointer to flow attributes structure.
105  * @param[in] dev_flow
106  *   Pointer to the sub flow.
107  * @param[in] tunnel_decap
108  *   Whether action is after tunnel decapsulation.
109  */
110 static void
111 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
112                   struct mlx5_flow *dev_flow, bool tunnel_decap)
113 {
114         uint64_t layers = dev_flow->handle->layers;
115
116         /*
117          * If layers is already initialized, it means this dev_flow is the
118          * suffix flow, the layers flags is set by the prefix flow. Need to
119          * use the layer flags from prefix flow as the suffix flow may not
120          * have the user defined items as the flow is split.
121          */
122         if (layers) {
123                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
124                         attr->ipv4 = 1;
125                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
126                         attr->ipv6 = 1;
127                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
128                         attr->tcp = 1;
129                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
130                         attr->udp = 1;
131                 attr->valid = 1;
132                 return;
133         }
134         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
135                 uint8_t next_protocol = 0xff;
136                 switch (item->type) {
137                 case RTE_FLOW_ITEM_TYPE_GRE:
138                 case RTE_FLOW_ITEM_TYPE_NVGRE:
139                 case RTE_FLOW_ITEM_TYPE_VXLAN:
140                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
141                 case RTE_FLOW_ITEM_TYPE_GENEVE:
142                 case RTE_FLOW_ITEM_TYPE_MPLS:
143                         if (tunnel_decap)
144                                 attr->attr = 0;
145                         break;
146                 case RTE_FLOW_ITEM_TYPE_IPV4:
147                         if (!attr->ipv6)
148                                 attr->ipv4 = 1;
149                         if (item->mask != NULL &&
150                             ((const struct rte_flow_item_ipv4 *)
151                             item->mask)->hdr.next_proto_id)
152                                 next_protocol =
153                                     ((const struct rte_flow_item_ipv4 *)
154                                       (item->spec))->hdr.next_proto_id &
155                                     ((const struct rte_flow_item_ipv4 *)
156                                       (item->mask))->hdr.next_proto_id;
157                         if ((next_protocol == IPPROTO_IPIP ||
158                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
159                                 attr->attr = 0;
160                         break;
161                 case RTE_FLOW_ITEM_TYPE_IPV6:
162                         if (!attr->ipv4)
163                                 attr->ipv6 = 1;
164                         if (item->mask != NULL &&
165                             ((const struct rte_flow_item_ipv6 *)
166                             item->mask)->hdr.proto)
167                                 next_protocol =
168                                     ((const struct rte_flow_item_ipv6 *)
169                                       (item->spec))->hdr.proto &
170                                     ((const struct rte_flow_item_ipv6 *)
171                                       (item->mask))->hdr.proto;
172                         if ((next_protocol == IPPROTO_IPIP ||
173                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
174                                 attr->attr = 0;
175                         break;
176                 case RTE_FLOW_ITEM_TYPE_UDP:
177                         if (!attr->tcp)
178                                 attr->udp = 1;
179                         break;
180                 case RTE_FLOW_ITEM_TYPE_TCP:
181                         if (!attr->udp)
182                                 attr->tcp = 1;
183                         break;
184                 default:
185                         break;
186                 }
187         }
188         attr->valid = 1;
189 }
190
191 /**
192  * Convert rte_mtr_color to mlx5 color.
193  *
194  * @param[in] rcol
195  *   rte_mtr_color.
196  *
197  * @return
198  *   mlx5 color.
199  */
200 static int
201 rte_col_2_mlx5_col(enum rte_color rcol)
202 {
203         switch (rcol) {
204         case RTE_COLOR_GREEN:
205                 return MLX5_FLOW_COLOR_GREEN;
206         case RTE_COLOR_YELLOW:
207                 return MLX5_FLOW_COLOR_YELLOW;
208         case RTE_COLOR_RED:
209                 return MLX5_FLOW_COLOR_RED;
210         default:
211                 break;
212         }
213         return MLX5_FLOW_COLOR_UNDEFINED;
214 }
215
216 struct field_modify_info {
217         uint32_t size; /* Size of field in protocol header, in bytes. */
218         uint32_t offset; /* Offset of field in protocol header, in bytes. */
219         enum mlx5_modification_field id;
220 };
221
222 struct field_modify_info modify_eth[] = {
223         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
224         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
225         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
226         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
227         {0, 0, 0},
228 };
229
230 struct field_modify_info modify_vlan_out_first_vid[] = {
231         /* Size in bits !!! */
232         {12, 0, MLX5_MODI_OUT_FIRST_VID},
233         {0, 0, 0},
234 };
235
236 struct field_modify_info modify_ipv4[] = {
237         {1,  1, MLX5_MODI_OUT_IP_DSCP},
238         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
239         {4, 12, MLX5_MODI_OUT_SIPV4},
240         {4, 16, MLX5_MODI_OUT_DIPV4},
241         {0, 0, 0},
242 };
243
244 struct field_modify_info modify_ipv6[] = {
245         {1,  0, MLX5_MODI_OUT_IP_DSCP},
246         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
247         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
248         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
249         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
250         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
251         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
252         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
253         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
254         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
255         {0, 0, 0},
256 };
257
258 struct field_modify_info modify_udp[] = {
259         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
260         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
261         {0, 0, 0},
262 };
263
264 struct field_modify_info modify_tcp[] = {
265         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
266         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
267         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
268         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
269         {0, 0, 0},
270 };
271
272 static const struct rte_flow_item *
273 mlx5_flow_find_tunnel_item(const struct rte_flow_item *item)
274 {
275         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
276                 switch (item->type) {
277                 default:
278                         break;
279                 case RTE_FLOW_ITEM_TYPE_VXLAN:
280                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
281                 case RTE_FLOW_ITEM_TYPE_GRE:
282                 case RTE_FLOW_ITEM_TYPE_MPLS:
283                 case RTE_FLOW_ITEM_TYPE_NVGRE:
284                 case RTE_FLOW_ITEM_TYPE_GENEVE:
285                         return item;
286                 case RTE_FLOW_ITEM_TYPE_IPV4:
287                 case RTE_FLOW_ITEM_TYPE_IPV6:
288                         if (item[1].type == RTE_FLOW_ITEM_TYPE_IPV4 ||
289                             item[1].type == RTE_FLOW_ITEM_TYPE_IPV6)
290                                 return item;
291                         break;
292                 }
293         }
294         return NULL;
295 }
296
297 static void
298 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
299                           uint8_t next_protocol, uint64_t *item_flags,
300                           int *tunnel)
301 {
302         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
303                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
304         if (next_protocol == IPPROTO_IPIP) {
305                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
306                 *tunnel = 1;
307         }
308         if (next_protocol == IPPROTO_IPV6) {
309                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
310                 *tunnel = 1;
311         }
312 }
313
314 /* Update VLAN's VID/PCP based on input rte_flow_action.
315  *
316  * @param[in] action
317  *   Pointer to struct rte_flow_action.
318  * @param[out] vlan
319  *   Pointer to struct rte_vlan_hdr.
320  */
321 static void
322 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
323                          struct rte_vlan_hdr *vlan)
324 {
325         uint16_t vlan_tci;
326         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
327                 vlan_tci =
328                     ((const struct rte_flow_action_of_set_vlan_pcp *)
329                                                action->conf)->vlan_pcp;
330                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
331                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
332                 vlan->vlan_tci |= vlan_tci;
333         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
334                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
335                 vlan->vlan_tci |= rte_be_to_cpu_16
336                     (((const struct rte_flow_action_of_set_vlan_vid *)
337                                              action->conf)->vlan_vid);
338         }
339 }
340
341 /**
342  * Fetch 1, 2, 3 or 4 byte field from the byte array
343  * and return as unsigned integer in host-endian format.
344  *
345  * @param[in] data
346  *   Pointer to data array.
347  * @param[in] size
348  *   Size of field to extract.
349  *
350  * @return
351  *   converted field in host endian format.
352  */
353 static inline uint32_t
354 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
355 {
356         uint32_t ret;
357
358         switch (size) {
359         case 1:
360                 ret = *data;
361                 break;
362         case 2:
363                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
364                 break;
365         case 3:
366                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
367                 ret = (ret << 8) | *(data + sizeof(uint16_t));
368                 break;
369         case 4:
370                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
371                 break;
372         default:
373                 MLX5_ASSERT(false);
374                 ret = 0;
375                 break;
376         }
377         return ret;
378 }
379
380 /**
381  * Convert modify-header action to DV specification.
382  *
383  * Data length of each action is determined by provided field description
384  * and the item mask. Data bit offset and width of each action is determined
385  * by provided item mask.
386  *
387  * @param[in] item
388  *   Pointer to item specification.
389  * @param[in] field
390  *   Pointer to field modification information.
391  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
392  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
393  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
394  * @param[in] dcopy
395  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
396  *   Negative offset value sets the same offset as source offset.
397  *   size field is ignored, value is taken from source field.
398  * @param[in,out] resource
399  *   Pointer to the modify-header resource.
400  * @param[in] type
401  *   Type of modification.
402  * @param[out] error
403  *   Pointer to the error structure.
404  *
405  * @return
406  *   0 on success, a negative errno value otherwise and rte_errno is set.
407  */
408 static int
409 flow_dv_convert_modify_action(struct rte_flow_item *item,
410                               struct field_modify_info *field,
411                               struct field_modify_info *dcopy,
412                               struct mlx5_flow_dv_modify_hdr_resource *resource,
413                               uint32_t type, struct rte_flow_error *error)
414 {
415         uint32_t i = resource->actions_num;
416         struct mlx5_modification_cmd *actions = resource->actions;
417         uint32_t carry_b = 0;
418
419         /*
420          * The item and mask are provided in big-endian format.
421          * The fields should be presented as in big-endian format either.
422          * Mask must be always present, it defines the actual field width.
423          */
424         MLX5_ASSERT(item->mask);
425         MLX5_ASSERT(field->size);
426         do {
427                 uint32_t size_b;
428                 uint32_t off_b;
429                 uint32_t mask;
430                 uint32_t data;
431                 bool next_field = true;
432                 bool next_dcopy = true;
433
434                 if (i >= MLX5_MAX_MODIFY_NUM)
435                         return rte_flow_error_set(error, EINVAL,
436                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
437                                  "too many items to modify");
438                 /* Fetch variable byte size mask from the array. */
439                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
440                                            field->offset, field->size);
441                 if (!mask) {
442                         ++field;
443                         continue;
444                 }
445                 /* Deduce actual data width in bits from mask value. */
446                 off_b = rte_bsf32(mask) + carry_b;
447                 size_b = sizeof(uint32_t) * CHAR_BIT -
448                          off_b - __builtin_clz(mask);
449                 MLX5_ASSERT(size_b);
450                 actions[i] = (struct mlx5_modification_cmd) {
451                         .action_type = type,
452                         .field = field->id,
453                         .offset = off_b,
454                         .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ?
455                                 0 : size_b,
456                 };
457                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
458                         MLX5_ASSERT(dcopy);
459                         actions[i].dst_field = dcopy->id;
460                         actions[i].dst_offset =
461                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
462                         /* Convert entire record to big-endian format. */
463                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
464                         /*
465                          * Destination field overflow. Copy leftovers of
466                          * a source field to the next destination field.
467                          */
468                         carry_b = 0;
469                         if ((size_b > dcopy->size * CHAR_BIT - dcopy->offset) &&
470                             dcopy->size != 0) {
471                                 actions[i].length =
472                                         dcopy->size * CHAR_BIT - dcopy->offset;
473                                 carry_b = actions[i].length;
474                                 next_field = false;
475                         }
476                         /*
477                          * Not enough bits in a source filed to fill a
478                          * destination field. Switch to the next source.
479                          */
480                         if ((size_b < dcopy->size * CHAR_BIT - dcopy->offset) &&
481                             (size_b == field->size * CHAR_BIT - off_b)) {
482                                 actions[i].length =
483                                         field->size * CHAR_BIT - off_b;
484                                 dcopy->offset += actions[i].length;
485                                 next_dcopy = false;
486                         }
487                         if (next_dcopy)
488                                 ++dcopy;
489                 } else {
490                         MLX5_ASSERT(item->spec);
491                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
492                                                    field->offset, field->size);
493                         /* Shift out the trailing masked bits from data. */
494                         data = (data & mask) >> off_b;
495                         actions[i].data1 = rte_cpu_to_be_32(data);
496                 }
497                 /* Convert entire record to expected big-endian format. */
498                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
499                 if (next_field)
500                         ++field;
501                 ++i;
502         } while (field->size);
503         if (resource->actions_num == i)
504                 return rte_flow_error_set(error, EINVAL,
505                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
506                                           "invalid modification flow item");
507         resource->actions_num = i;
508         return 0;
509 }
510
511 /**
512  * Convert modify-header set IPv4 address action to DV specification.
513  *
514  * @param[in,out] resource
515  *   Pointer to the modify-header resource.
516  * @param[in] action
517  *   Pointer to action specification.
518  * @param[out] error
519  *   Pointer to the error structure.
520  *
521  * @return
522  *   0 on success, a negative errno value otherwise and rte_errno is set.
523  */
524 static int
525 flow_dv_convert_action_modify_ipv4
526                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
527                          const struct rte_flow_action *action,
528                          struct rte_flow_error *error)
529 {
530         const struct rte_flow_action_set_ipv4 *conf =
531                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
532         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
533         struct rte_flow_item_ipv4 ipv4;
534         struct rte_flow_item_ipv4 ipv4_mask;
535
536         memset(&ipv4, 0, sizeof(ipv4));
537         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
538         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
539                 ipv4.hdr.src_addr = conf->ipv4_addr;
540                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
541         } else {
542                 ipv4.hdr.dst_addr = conf->ipv4_addr;
543                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
544         }
545         item.spec = &ipv4;
546         item.mask = &ipv4_mask;
547         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
548                                              MLX5_MODIFICATION_TYPE_SET, error);
549 }
550
551 /**
552  * Convert modify-header set IPv6 address action to DV specification.
553  *
554  * @param[in,out] resource
555  *   Pointer to the modify-header resource.
556  * @param[in] action
557  *   Pointer to action specification.
558  * @param[out] error
559  *   Pointer to the error structure.
560  *
561  * @return
562  *   0 on success, a negative errno value otherwise and rte_errno is set.
563  */
564 static int
565 flow_dv_convert_action_modify_ipv6
566                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
567                          const struct rte_flow_action *action,
568                          struct rte_flow_error *error)
569 {
570         const struct rte_flow_action_set_ipv6 *conf =
571                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
572         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
573         struct rte_flow_item_ipv6 ipv6;
574         struct rte_flow_item_ipv6 ipv6_mask;
575
576         memset(&ipv6, 0, sizeof(ipv6));
577         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
578         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
579                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
580                        sizeof(ipv6.hdr.src_addr));
581                 memcpy(&ipv6_mask.hdr.src_addr,
582                        &rte_flow_item_ipv6_mask.hdr.src_addr,
583                        sizeof(ipv6.hdr.src_addr));
584         } else {
585                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
586                        sizeof(ipv6.hdr.dst_addr));
587                 memcpy(&ipv6_mask.hdr.dst_addr,
588                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
589                        sizeof(ipv6.hdr.dst_addr));
590         }
591         item.spec = &ipv6;
592         item.mask = &ipv6_mask;
593         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
594                                              MLX5_MODIFICATION_TYPE_SET, error);
595 }
596
597 /**
598  * Convert modify-header set MAC address action to DV specification.
599  *
600  * @param[in,out] resource
601  *   Pointer to the modify-header resource.
602  * @param[in] action
603  *   Pointer to action specification.
604  * @param[out] error
605  *   Pointer to the error structure.
606  *
607  * @return
608  *   0 on success, a negative errno value otherwise and rte_errno is set.
609  */
610 static int
611 flow_dv_convert_action_modify_mac
612                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
613                          const struct rte_flow_action *action,
614                          struct rte_flow_error *error)
615 {
616         const struct rte_flow_action_set_mac *conf =
617                 (const struct rte_flow_action_set_mac *)(action->conf);
618         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
619         struct rte_flow_item_eth eth;
620         struct rte_flow_item_eth eth_mask;
621
622         memset(&eth, 0, sizeof(eth));
623         memset(&eth_mask, 0, sizeof(eth_mask));
624         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
625                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
626                        sizeof(eth.src.addr_bytes));
627                 memcpy(&eth_mask.src.addr_bytes,
628                        &rte_flow_item_eth_mask.src.addr_bytes,
629                        sizeof(eth_mask.src.addr_bytes));
630         } else {
631                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
632                        sizeof(eth.dst.addr_bytes));
633                 memcpy(&eth_mask.dst.addr_bytes,
634                        &rte_flow_item_eth_mask.dst.addr_bytes,
635                        sizeof(eth_mask.dst.addr_bytes));
636         }
637         item.spec = &eth;
638         item.mask = &eth_mask;
639         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
640                                              MLX5_MODIFICATION_TYPE_SET, error);
641 }
642
643 /**
644  * Convert modify-header set VLAN VID action to DV specification.
645  *
646  * @param[in,out] resource
647  *   Pointer to the modify-header resource.
648  * @param[in] action
649  *   Pointer to action specification.
650  * @param[out] error
651  *   Pointer to the error structure.
652  *
653  * @return
654  *   0 on success, a negative errno value otherwise and rte_errno is set.
655  */
656 static int
657 flow_dv_convert_action_modify_vlan_vid
658                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
659                          const struct rte_flow_action *action,
660                          struct rte_flow_error *error)
661 {
662         const struct rte_flow_action_of_set_vlan_vid *conf =
663                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
664         int i = resource->actions_num;
665         struct mlx5_modification_cmd *actions = resource->actions;
666         struct field_modify_info *field = modify_vlan_out_first_vid;
667
668         if (i >= MLX5_MAX_MODIFY_NUM)
669                 return rte_flow_error_set(error, EINVAL,
670                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
671                          "too many items to modify");
672         actions[i] = (struct mlx5_modification_cmd) {
673                 .action_type = MLX5_MODIFICATION_TYPE_SET,
674                 .field = field->id,
675                 .length = field->size,
676                 .offset = field->offset,
677         };
678         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
679         actions[i].data1 = conf->vlan_vid;
680         actions[i].data1 = actions[i].data1 << 16;
681         resource->actions_num = ++i;
682         return 0;
683 }
684
685 /**
686  * Convert modify-header set TP action to DV specification.
687  *
688  * @param[in,out] resource
689  *   Pointer to the modify-header resource.
690  * @param[in] action
691  *   Pointer to action specification.
692  * @param[in] items
693  *   Pointer to rte_flow_item objects list.
694  * @param[in] attr
695  *   Pointer to flow attributes structure.
696  * @param[in] dev_flow
697  *   Pointer to the sub flow.
698  * @param[in] tunnel_decap
699  *   Whether action is after tunnel decapsulation.
700  * @param[out] error
701  *   Pointer to the error structure.
702  *
703  * @return
704  *   0 on success, a negative errno value otherwise and rte_errno is set.
705  */
706 static int
707 flow_dv_convert_action_modify_tp
708                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
709                          const struct rte_flow_action *action,
710                          const struct rte_flow_item *items,
711                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
712                          bool tunnel_decap, struct rte_flow_error *error)
713 {
714         const struct rte_flow_action_set_tp *conf =
715                 (const struct rte_flow_action_set_tp *)(action->conf);
716         struct rte_flow_item item;
717         struct rte_flow_item_udp udp;
718         struct rte_flow_item_udp udp_mask;
719         struct rte_flow_item_tcp tcp;
720         struct rte_flow_item_tcp tcp_mask;
721         struct field_modify_info *field;
722
723         if (!attr->valid)
724                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
725         if (attr->udp) {
726                 memset(&udp, 0, sizeof(udp));
727                 memset(&udp_mask, 0, sizeof(udp_mask));
728                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
729                         udp.hdr.src_port = conf->port;
730                         udp_mask.hdr.src_port =
731                                         rte_flow_item_udp_mask.hdr.src_port;
732                 } else {
733                         udp.hdr.dst_port = conf->port;
734                         udp_mask.hdr.dst_port =
735                                         rte_flow_item_udp_mask.hdr.dst_port;
736                 }
737                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
738                 item.spec = &udp;
739                 item.mask = &udp_mask;
740                 field = modify_udp;
741         } else {
742                 MLX5_ASSERT(attr->tcp);
743                 memset(&tcp, 0, sizeof(tcp));
744                 memset(&tcp_mask, 0, sizeof(tcp_mask));
745                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
746                         tcp.hdr.src_port = conf->port;
747                         tcp_mask.hdr.src_port =
748                                         rte_flow_item_tcp_mask.hdr.src_port;
749                 } else {
750                         tcp.hdr.dst_port = conf->port;
751                         tcp_mask.hdr.dst_port =
752                                         rte_flow_item_tcp_mask.hdr.dst_port;
753                 }
754                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
755                 item.spec = &tcp;
756                 item.mask = &tcp_mask;
757                 field = modify_tcp;
758         }
759         return flow_dv_convert_modify_action(&item, field, NULL, resource,
760                                              MLX5_MODIFICATION_TYPE_SET, error);
761 }
762
763 /**
764  * Convert modify-header set TTL action to DV specification.
765  *
766  * @param[in,out] resource
767  *   Pointer to the modify-header resource.
768  * @param[in] action
769  *   Pointer to action specification.
770  * @param[in] items
771  *   Pointer to rte_flow_item objects list.
772  * @param[in] attr
773  *   Pointer to flow attributes structure.
774  * @param[in] dev_flow
775  *   Pointer to the sub flow.
776  * @param[in] tunnel_decap
777  *   Whether action is after tunnel decapsulation.
778  * @param[out] error
779  *   Pointer to the error structure.
780  *
781  * @return
782  *   0 on success, a negative errno value otherwise and rte_errno is set.
783  */
784 static int
785 flow_dv_convert_action_modify_ttl
786                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
787                          const struct rte_flow_action *action,
788                          const struct rte_flow_item *items,
789                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
790                          bool tunnel_decap, struct rte_flow_error *error)
791 {
792         const struct rte_flow_action_set_ttl *conf =
793                 (const struct rte_flow_action_set_ttl *)(action->conf);
794         struct rte_flow_item item;
795         struct rte_flow_item_ipv4 ipv4;
796         struct rte_flow_item_ipv4 ipv4_mask;
797         struct rte_flow_item_ipv6 ipv6;
798         struct rte_flow_item_ipv6 ipv6_mask;
799         struct field_modify_info *field;
800
801         if (!attr->valid)
802                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
803         if (attr->ipv4) {
804                 memset(&ipv4, 0, sizeof(ipv4));
805                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
806                 ipv4.hdr.time_to_live = conf->ttl_value;
807                 ipv4_mask.hdr.time_to_live = 0xFF;
808                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
809                 item.spec = &ipv4;
810                 item.mask = &ipv4_mask;
811                 field = modify_ipv4;
812         } else {
813                 MLX5_ASSERT(attr->ipv6);
814                 memset(&ipv6, 0, sizeof(ipv6));
815                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
816                 ipv6.hdr.hop_limits = conf->ttl_value;
817                 ipv6_mask.hdr.hop_limits = 0xFF;
818                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
819                 item.spec = &ipv6;
820                 item.mask = &ipv6_mask;
821                 field = modify_ipv6;
822         }
823         return flow_dv_convert_modify_action(&item, field, NULL, resource,
824                                              MLX5_MODIFICATION_TYPE_SET, error);
825 }
826
827 /**
828  * Convert modify-header decrement TTL action to DV specification.
829  *
830  * @param[in,out] resource
831  *   Pointer to the modify-header resource.
832  * @param[in] action
833  *   Pointer to action specification.
834  * @param[in] items
835  *   Pointer to rte_flow_item objects list.
836  * @param[in] attr
837  *   Pointer to flow attributes structure.
838  * @param[in] dev_flow
839  *   Pointer to the sub flow.
840  * @param[in] tunnel_decap
841  *   Whether action is after tunnel decapsulation.
842  * @param[out] error
843  *   Pointer to the error structure.
844  *
845  * @return
846  *   0 on success, a negative errno value otherwise and rte_errno is set.
847  */
848 static int
849 flow_dv_convert_action_modify_dec_ttl
850                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
851                          const struct rte_flow_item *items,
852                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
853                          bool tunnel_decap, struct rte_flow_error *error)
854 {
855         struct rte_flow_item item;
856         struct rte_flow_item_ipv4 ipv4;
857         struct rte_flow_item_ipv4 ipv4_mask;
858         struct rte_flow_item_ipv6 ipv6;
859         struct rte_flow_item_ipv6 ipv6_mask;
860         struct field_modify_info *field;
861
862         if (!attr->valid)
863                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
864         if (attr->ipv4) {
865                 memset(&ipv4, 0, sizeof(ipv4));
866                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
867                 ipv4.hdr.time_to_live = 0xFF;
868                 ipv4_mask.hdr.time_to_live = 0xFF;
869                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
870                 item.spec = &ipv4;
871                 item.mask = &ipv4_mask;
872                 field = modify_ipv4;
873         } else {
874                 MLX5_ASSERT(attr->ipv6);
875                 memset(&ipv6, 0, sizeof(ipv6));
876                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
877                 ipv6.hdr.hop_limits = 0xFF;
878                 ipv6_mask.hdr.hop_limits = 0xFF;
879                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
880                 item.spec = &ipv6;
881                 item.mask = &ipv6_mask;
882                 field = modify_ipv6;
883         }
884         return flow_dv_convert_modify_action(&item, field, NULL, resource,
885                                              MLX5_MODIFICATION_TYPE_ADD, error);
886 }
887
888 /**
889  * Convert modify-header increment/decrement TCP Sequence number
890  * to DV specification.
891  *
892  * @param[in,out] resource
893  *   Pointer to the modify-header resource.
894  * @param[in] action
895  *   Pointer to action specification.
896  * @param[out] error
897  *   Pointer to the error structure.
898  *
899  * @return
900  *   0 on success, a negative errno value otherwise and rte_errno is set.
901  */
902 static int
903 flow_dv_convert_action_modify_tcp_seq
904                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
905                          const struct rte_flow_action *action,
906                          struct rte_flow_error *error)
907 {
908         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
909         uint64_t value = rte_be_to_cpu_32(*conf);
910         struct rte_flow_item item;
911         struct rte_flow_item_tcp tcp;
912         struct rte_flow_item_tcp tcp_mask;
913
914         memset(&tcp, 0, sizeof(tcp));
915         memset(&tcp_mask, 0, sizeof(tcp_mask));
916         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
917                 /*
918                  * The HW has no decrement operation, only increment operation.
919                  * To simulate decrement X from Y using increment operation
920                  * we need to add UINT32_MAX X times to Y.
921                  * Each adding of UINT32_MAX decrements Y by 1.
922                  */
923                 value *= UINT32_MAX;
924         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
925         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
926         item.type = RTE_FLOW_ITEM_TYPE_TCP;
927         item.spec = &tcp;
928         item.mask = &tcp_mask;
929         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
930                                              MLX5_MODIFICATION_TYPE_ADD, error);
931 }
932
933 /**
934  * Convert modify-header increment/decrement TCP Acknowledgment number
935  * to DV specification.
936  *
937  * @param[in,out] resource
938  *   Pointer to the modify-header resource.
939  * @param[in] action
940  *   Pointer to action specification.
941  * @param[out] error
942  *   Pointer to the error structure.
943  *
944  * @return
945  *   0 on success, a negative errno value otherwise and rte_errno is set.
946  */
947 static int
948 flow_dv_convert_action_modify_tcp_ack
949                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
950                          const struct rte_flow_action *action,
951                          struct rte_flow_error *error)
952 {
953         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
954         uint64_t value = rte_be_to_cpu_32(*conf);
955         struct rte_flow_item item;
956         struct rte_flow_item_tcp tcp;
957         struct rte_flow_item_tcp tcp_mask;
958
959         memset(&tcp, 0, sizeof(tcp));
960         memset(&tcp_mask, 0, sizeof(tcp_mask));
961         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
962                 /*
963                  * The HW has no decrement operation, only increment operation.
964                  * To simulate decrement X from Y using increment operation
965                  * we need to add UINT32_MAX X times to Y.
966                  * Each adding of UINT32_MAX decrements Y by 1.
967                  */
968                 value *= UINT32_MAX;
969         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
970         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
971         item.type = RTE_FLOW_ITEM_TYPE_TCP;
972         item.spec = &tcp;
973         item.mask = &tcp_mask;
974         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
975                                              MLX5_MODIFICATION_TYPE_ADD, error);
976 }
977
978 static enum mlx5_modification_field reg_to_field[] = {
979         [REG_NON] = MLX5_MODI_OUT_NONE,
980         [REG_A] = MLX5_MODI_META_DATA_REG_A,
981         [REG_B] = MLX5_MODI_META_DATA_REG_B,
982         [REG_C_0] = MLX5_MODI_META_REG_C_0,
983         [REG_C_1] = MLX5_MODI_META_REG_C_1,
984         [REG_C_2] = MLX5_MODI_META_REG_C_2,
985         [REG_C_3] = MLX5_MODI_META_REG_C_3,
986         [REG_C_4] = MLX5_MODI_META_REG_C_4,
987         [REG_C_5] = MLX5_MODI_META_REG_C_5,
988         [REG_C_6] = MLX5_MODI_META_REG_C_6,
989         [REG_C_7] = MLX5_MODI_META_REG_C_7,
990 };
991
992 /**
993  * Convert register set to DV specification.
994  *
995  * @param[in,out] resource
996  *   Pointer to the modify-header resource.
997  * @param[in] action
998  *   Pointer to action specification.
999  * @param[out] error
1000  *   Pointer to the error structure.
1001  *
1002  * @return
1003  *   0 on success, a negative errno value otherwise and rte_errno is set.
1004  */
1005 static int
1006 flow_dv_convert_action_set_reg
1007                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1008                          const struct rte_flow_action *action,
1009                          struct rte_flow_error *error)
1010 {
1011         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
1012         struct mlx5_modification_cmd *actions = resource->actions;
1013         uint32_t i = resource->actions_num;
1014
1015         if (i >= MLX5_MAX_MODIFY_NUM)
1016                 return rte_flow_error_set(error, EINVAL,
1017                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1018                                           "too many items to modify");
1019         MLX5_ASSERT(conf->id != REG_NON);
1020         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
1021         actions[i] = (struct mlx5_modification_cmd) {
1022                 .action_type = MLX5_MODIFICATION_TYPE_SET,
1023                 .field = reg_to_field[conf->id],
1024                 .offset = conf->offset,
1025                 .length = conf->length,
1026         };
1027         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1028         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1029         ++i;
1030         resource->actions_num = i;
1031         return 0;
1032 }
1033
1034 /**
1035  * Convert SET_TAG action to DV specification.
1036  *
1037  * @param[in] dev
1038  *   Pointer to the rte_eth_dev structure.
1039  * @param[in,out] resource
1040  *   Pointer to the modify-header resource.
1041  * @param[in] conf
1042  *   Pointer to action specification.
1043  * @param[out] error
1044  *   Pointer to the error structure.
1045  *
1046  * @return
1047  *   0 on success, a negative errno value otherwise and rte_errno is set.
1048  */
1049 static int
1050 flow_dv_convert_action_set_tag
1051                         (struct rte_eth_dev *dev,
1052                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1053                          const struct rte_flow_action_set_tag *conf,
1054                          struct rte_flow_error *error)
1055 {
1056         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1057         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1058         struct rte_flow_item item = {
1059                 .spec = &data,
1060                 .mask = &mask,
1061         };
1062         struct field_modify_info reg_c_x[] = {
1063                 [1] = {0, 0, 0},
1064         };
1065         enum mlx5_modification_field reg_type;
1066         int ret;
1067
1068         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1069         if (ret < 0)
1070                 return ret;
1071         MLX5_ASSERT(ret != REG_NON);
1072         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1073         reg_type = reg_to_field[ret];
1074         MLX5_ASSERT(reg_type > 0);
1075         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1076         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1077                                              MLX5_MODIFICATION_TYPE_SET, error);
1078 }
1079
1080 /**
1081  * Convert internal COPY_REG action to DV specification.
1082  *
1083  * @param[in] dev
1084  *   Pointer to the rte_eth_dev structure.
1085  * @param[in,out] res
1086  *   Pointer to the modify-header resource.
1087  * @param[in] action
1088  *   Pointer to action specification.
1089  * @param[out] error
1090  *   Pointer to the error structure.
1091  *
1092  * @return
1093  *   0 on success, a negative errno value otherwise and rte_errno is set.
1094  */
1095 static int
1096 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1097                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1098                                  const struct rte_flow_action *action,
1099                                  struct rte_flow_error *error)
1100 {
1101         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1102         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1103         struct rte_flow_item item = {
1104                 .spec = NULL,
1105                 .mask = &mask,
1106         };
1107         struct field_modify_info reg_src[] = {
1108                 {4, 0, reg_to_field[conf->src]},
1109                 {0, 0, 0},
1110         };
1111         struct field_modify_info reg_dst = {
1112                 .offset = 0,
1113                 .id = reg_to_field[conf->dst],
1114         };
1115         /* Adjust reg_c[0] usage according to reported mask. */
1116         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1117                 struct mlx5_priv *priv = dev->data->dev_private;
1118                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1119
1120                 MLX5_ASSERT(reg_c0);
1121                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1122                 if (conf->dst == REG_C_0) {
1123                         /* Copy to reg_c[0], within mask only. */
1124                         reg_dst.offset = rte_bsf32(reg_c0);
1125                         /*
1126                          * Mask is ignoring the enianness, because
1127                          * there is no conversion in datapath.
1128                          */
1129 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1130                         /* Copy from destination lower bits to reg_c[0]. */
1131                         mask = reg_c0 >> reg_dst.offset;
1132 #else
1133                         /* Copy from destination upper bits to reg_c[0]. */
1134                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1135                                           rte_fls_u32(reg_c0));
1136 #endif
1137                 } else {
1138                         mask = rte_cpu_to_be_32(reg_c0);
1139 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1140                         /* Copy from reg_c[0] to destination lower bits. */
1141                         reg_dst.offset = 0;
1142 #else
1143                         /* Copy from reg_c[0] to destination upper bits. */
1144                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1145                                          (rte_fls_u32(reg_c0) -
1146                                           rte_bsf32(reg_c0));
1147 #endif
1148                 }
1149         }
1150         return flow_dv_convert_modify_action(&item,
1151                                              reg_src, &reg_dst, res,
1152                                              MLX5_MODIFICATION_TYPE_COPY,
1153                                              error);
1154 }
1155
1156 /**
1157  * Convert MARK action to DV specification. This routine is used
1158  * in extensive metadata only and requires metadata register to be
1159  * handled. In legacy mode hardware tag resource is engaged.
1160  *
1161  * @param[in] dev
1162  *   Pointer to the rte_eth_dev structure.
1163  * @param[in] conf
1164  *   Pointer to MARK action specification.
1165  * @param[in,out] resource
1166  *   Pointer to the modify-header resource.
1167  * @param[out] error
1168  *   Pointer to the error structure.
1169  *
1170  * @return
1171  *   0 on success, a negative errno value otherwise and rte_errno is set.
1172  */
1173 static int
1174 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1175                             const struct rte_flow_action_mark *conf,
1176                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1177                             struct rte_flow_error *error)
1178 {
1179         struct mlx5_priv *priv = dev->data->dev_private;
1180         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1181                                            priv->sh->dv_mark_mask);
1182         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1183         struct rte_flow_item item = {
1184                 .spec = &data,
1185                 .mask = &mask,
1186         };
1187         struct field_modify_info reg_c_x[] = {
1188                 [1] = {0, 0, 0},
1189         };
1190         int reg;
1191
1192         if (!mask)
1193                 return rte_flow_error_set(error, EINVAL,
1194                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1195                                           NULL, "zero mark action mask");
1196         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1197         if (reg < 0)
1198                 return reg;
1199         MLX5_ASSERT(reg > 0);
1200         if (reg == REG_C_0) {
1201                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1202                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1203
1204                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1205                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1206                 mask = rte_cpu_to_be_32(mask << shl_c0);
1207         }
1208         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1209         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1210                                              MLX5_MODIFICATION_TYPE_SET, error);
1211 }
1212
1213 /**
1214  * Get metadata register index for specified steering domain.
1215  *
1216  * @param[in] dev
1217  *   Pointer to the rte_eth_dev structure.
1218  * @param[in] attr
1219  *   Attributes of flow to determine steering domain.
1220  * @param[out] error
1221  *   Pointer to the error structure.
1222  *
1223  * @return
1224  *   positive index on success, a negative errno value otherwise
1225  *   and rte_errno is set.
1226  */
1227 static enum modify_reg
1228 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1229                          const struct rte_flow_attr *attr,
1230                          struct rte_flow_error *error)
1231 {
1232         int reg =
1233                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1234                                           MLX5_METADATA_FDB :
1235                                             attr->egress ?
1236                                             MLX5_METADATA_TX :
1237                                             MLX5_METADATA_RX, 0, error);
1238         if (reg < 0)
1239                 return rte_flow_error_set(error,
1240                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1241                                           NULL, "unavailable "
1242                                           "metadata register");
1243         return reg;
1244 }
1245
1246 /**
1247  * Convert SET_META action to DV specification.
1248  *
1249  * @param[in] dev
1250  *   Pointer to the rte_eth_dev structure.
1251  * @param[in,out] resource
1252  *   Pointer to the modify-header resource.
1253  * @param[in] attr
1254  *   Attributes of flow that includes this item.
1255  * @param[in] conf
1256  *   Pointer to action specification.
1257  * @param[out] error
1258  *   Pointer to the error structure.
1259  *
1260  * @return
1261  *   0 on success, a negative errno value otherwise and rte_errno is set.
1262  */
1263 static int
1264 flow_dv_convert_action_set_meta
1265                         (struct rte_eth_dev *dev,
1266                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1267                          const struct rte_flow_attr *attr,
1268                          const struct rte_flow_action_set_meta *conf,
1269                          struct rte_flow_error *error)
1270 {
1271         uint32_t mask = rte_cpu_to_be_32(conf->mask);
1272         uint32_t data = rte_cpu_to_be_32(conf->data) & mask;
1273         struct rte_flow_item item = {
1274                 .spec = &data,
1275                 .mask = &mask,
1276         };
1277         struct field_modify_info reg_c_x[] = {
1278                 [1] = {0, 0, 0},
1279         };
1280         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1281
1282         if (reg < 0)
1283                 return reg;
1284         MLX5_ASSERT(reg != REG_NON);
1285         if (reg == REG_C_0) {
1286                 struct mlx5_priv *priv = dev->data->dev_private;
1287                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1288                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1289
1290                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1291                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1292                 mask = rte_cpu_to_be_32(mask << shl_c0);
1293         }
1294         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1295         /* The routine expects parameters in memory as big-endian ones. */
1296         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1297                                              MLX5_MODIFICATION_TYPE_SET, error);
1298 }
1299
1300 /**
1301  * Convert modify-header set IPv4 DSCP action to DV specification.
1302  *
1303  * @param[in,out] resource
1304  *   Pointer to the modify-header resource.
1305  * @param[in] action
1306  *   Pointer to action specification.
1307  * @param[out] error
1308  *   Pointer to the error structure.
1309  *
1310  * @return
1311  *   0 on success, a negative errno value otherwise and rte_errno is set.
1312  */
1313 static int
1314 flow_dv_convert_action_modify_ipv4_dscp
1315                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1316                          const struct rte_flow_action *action,
1317                          struct rte_flow_error *error)
1318 {
1319         const struct rte_flow_action_set_dscp *conf =
1320                 (const struct rte_flow_action_set_dscp *)(action->conf);
1321         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1322         struct rte_flow_item_ipv4 ipv4;
1323         struct rte_flow_item_ipv4 ipv4_mask;
1324
1325         memset(&ipv4, 0, sizeof(ipv4));
1326         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1327         ipv4.hdr.type_of_service = conf->dscp;
1328         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1329         item.spec = &ipv4;
1330         item.mask = &ipv4_mask;
1331         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1332                                              MLX5_MODIFICATION_TYPE_SET, error);
1333 }
1334
1335 /**
1336  * Convert modify-header set IPv6 DSCP action to DV specification.
1337  *
1338  * @param[in,out] resource
1339  *   Pointer to the modify-header resource.
1340  * @param[in] action
1341  *   Pointer to action specification.
1342  * @param[out] error
1343  *   Pointer to the error structure.
1344  *
1345  * @return
1346  *   0 on success, a negative errno value otherwise and rte_errno is set.
1347  */
1348 static int
1349 flow_dv_convert_action_modify_ipv6_dscp
1350                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1351                          const struct rte_flow_action *action,
1352                          struct rte_flow_error *error)
1353 {
1354         const struct rte_flow_action_set_dscp *conf =
1355                 (const struct rte_flow_action_set_dscp *)(action->conf);
1356         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1357         struct rte_flow_item_ipv6 ipv6;
1358         struct rte_flow_item_ipv6 ipv6_mask;
1359
1360         memset(&ipv6, 0, sizeof(ipv6));
1361         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1362         /*
1363          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1364          * rdma-core only accept the DSCP bits byte aligned start from
1365          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1366          * bits in IPv6 case as rdma-core requires byte aligned value.
1367          */
1368         ipv6.hdr.vtc_flow = conf->dscp;
1369         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1370         item.spec = &ipv6;
1371         item.mask = &ipv6_mask;
1372         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1373                                              MLX5_MODIFICATION_TYPE_SET, error);
1374 }
1375
1376 static int
1377 mlx5_flow_item_field_width(struct mlx5_dev_config *config,
1378                            enum rte_flow_field_id field)
1379 {
1380         switch (field) {
1381         case RTE_FLOW_FIELD_START:
1382                 return 32;
1383         case RTE_FLOW_FIELD_MAC_DST:
1384         case RTE_FLOW_FIELD_MAC_SRC:
1385                 return 48;
1386         case RTE_FLOW_FIELD_VLAN_TYPE:
1387                 return 16;
1388         case RTE_FLOW_FIELD_VLAN_ID:
1389                 return 12;
1390         case RTE_FLOW_FIELD_MAC_TYPE:
1391                 return 16;
1392         case RTE_FLOW_FIELD_IPV4_DSCP:
1393                 return 6;
1394         case RTE_FLOW_FIELD_IPV4_TTL:
1395                 return 8;
1396         case RTE_FLOW_FIELD_IPV4_SRC:
1397         case RTE_FLOW_FIELD_IPV4_DST:
1398                 return 32;
1399         case RTE_FLOW_FIELD_IPV6_DSCP:
1400                 return 6;
1401         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1402                 return 8;
1403         case RTE_FLOW_FIELD_IPV6_SRC:
1404         case RTE_FLOW_FIELD_IPV6_DST:
1405                 return 128;
1406         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1407         case RTE_FLOW_FIELD_TCP_PORT_DST:
1408                 return 16;
1409         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1410         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1411                 return 32;
1412         case RTE_FLOW_FIELD_TCP_FLAGS:
1413                 return 9;
1414         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1415         case RTE_FLOW_FIELD_UDP_PORT_DST:
1416                 return 16;
1417         case RTE_FLOW_FIELD_VXLAN_VNI:
1418         case RTE_FLOW_FIELD_GENEVE_VNI:
1419                 return 24;
1420         case RTE_FLOW_FIELD_GTP_TEID:
1421         case RTE_FLOW_FIELD_TAG:
1422                 return 32;
1423         case RTE_FLOW_FIELD_MARK:
1424                 return 24;
1425         case RTE_FLOW_FIELD_META:
1426                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_META16)
1427                         return 16;
1428                 else if (config->dv_xmeta_en == MLX5_XMETA_MODE_META32)
1429                         return 32;
1430                 else
1431                         return 0;
1432         case RTE_FLOW_FIELD_POINTER:
1433         case RTE_FLOW_FIELD_VALUE:
1434                 return 64;
1435         default:
1436                 MLX5_ASSERT(false);
1437         }
1438         return 0;
1439 }
1440
1441 static void
1442 mlx5_flow_field_id_to_modify_info
1443                 (const struct rte_flow_action_modify_data *data,
1444                  struct field_modify_info *info,
1445                  uint32_t *mask, uint32_t *value,
1446                  uint32_t width, uint32_t dst_width,
1447                  struct rte_eth_dev *dev,
1448                  const struct rte_flow_attr *attr,
1449                  struct rte_flow_error *error)
1450 {
1451         struct mlx5_priv *priv = dev->data->dev_private;
1452         struct mlx5_dev_config *config = &priv->config;
1453         uint32_t idx = 0;
1454         uint32_t off = 0;
1455         uint64_t val = 0;
1456         switch (data->field) {
1457         case RTE_FLOW_FIELD_START:
1458                 /* not supported yet */
1459                 MLX5_ASSERT(false);
1460                 break;
1461         case RTE_FLOW_FIELD_MAC_DST:
1462                 off = data->offset > 16 ? data->offset - 16 : 0;
1463                 if (mask) {
1464                         if (data->offset < 16) {
1465                                 info[idx] = (struct field_modify_info){2, 0,
1466                                                 MLX5_MODI_OUT_DMAC_15_0};
1467                                 if (width < 16) {
1468                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1469                                                                  (16 - width));
1470                                         width = 0;
1471                                 } else {
1472                                         mask[idx] = RTE_BE16(0xffff);
1473                                         width -= 16;
1474                                 }
1475                                 if (!width)
1476                                         break;
1477                                 ++idx;
1478                         }
1479                         info[idx] = (struct field_modify_info){4, 4 * idx,
1480                                                 MLX5_MODI_OUT_DMAC_47_16};
1481                         mask[idx] = rte_cpu_to_be_32((0xffffffff >>
1482                                                       (32 - width)) << off);
1483                 } else {
1484                         if (data->offset < 16)
1485                                 info[idx++] = (struct field_modify_info){2, 0,
1486                                                 MLX5_MODI_OUT_DMAC_15_0};
1487                         info[idx] = (struct field_modify_info){4, off,
1488                                                 MLX5_MODI_OUT_DMAC_47_16};
1489                 }
1490                 break;
1491         case RTE_FLOW_FIELD_MAC_SRC:
1492                 off = data->offset > 16 ? data->offset - 16 : 0;
1493                 if (mask) {
1494                         if (data->offset < 16) {
1495                                 info[idx] = (struct field_modify_info){2, 0,
1496                                                 MLX5_MODI_OUT_SMAC_15_0};
1497                                 if (width < 16) {
1498                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1499                                                                  (16 - width));
1500                                         width = 0;
1501                                 } else {
1502                                         mask[idx] = RTE_BE16(0xffff);
1503                                         width -= 16;
1504                                 }
1505                                 if (!width)
1506                                         break;
1507                                 ++idx;
1508                         }
1509                         info[idx] = (struct field_modify_info){4, 4 * idx,
1510                                                 MLX5_MODI_OUT_SMAC_47_16};
1511                         mask[idx] = rte_cpu_to_be_32((0xffffffff >>
1512                                                       (32 - width)) << off);
1513                 } else {
1514                         if (data->offset < 16)
1515                                 info[idx++] = (struct field_modify_info){2, 0,
1516                                                 MLX5_MODI_OUT_SMAC_15_0};
1517                         info[idx] = (struct field_modify_info){4, off,
1518                                                 MLX5_MODI_OUT_SMAC_47_16};
1519                 }
1520                 break;
1521         case RTE_FLOW_FIELD_VLAN_TYPE:
1522                 /* not supported yet */
1523                 break;
1524         case RTE_FLOW_FIELD_VLAN_ID:
1525                 info[idx] = (struct field_modify_info){2, 0,
1526                                         MLX5_MODI_OUT_FIRST_VID};
1527                 if (mask)
1528                         mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1529                 break;
1530         case RTE_FLOW_FIELD_MAC_TYPE:
1531                 info[idx] = (struct field_modify_info){2, 0,
1532                                         MLX5_MODI_OUT_ETHERTYPE};
1533                 if (mask)
1534                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1535                 break;
1536         case RTE_FLOW_FIELD_IPV4_DSCP:
1537                 info[idx] = (struct field_modify_info){1, 0,
1538                                         MLX5_MODI_OUT_IP_DSCP};
1539                 if (mask)
1540                         mask[idx] = 0x3f >> (6 - width);
1541                 break;
1542         case RTE_FLOW_FIELD_IPV4_TTL:
1543                 info[idx] = (struct field_modify_info){1, 0,
1544                                         MLX5_MODI_OUT_IPV4_TTL};
1545                 if (mask)
1546                         mask[idx] = 0xff >> (8 - width);
1547                 break;
1548         case RTE_FLOW_FIELD_IPV4_SRC:
1549                 info[idx] = (struct field_modify_info){4, 0,
1550                                         MLX5_MODI_OUT_SIPV4};
1551                 if (mask)
1552                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1553                                                      (32 - width));
1554                 break;
1555         case RTE_FLOW_FIELD_IPV4_DST:
1556                 info[idx] = (struct field_modify_info){4, 0,
1557                                         MLX5_MODI_OUT_DIPV4};
1558                 if (mask)
1559                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1560                                                      (32 - width));
1561                 break;
1562         case RTE_FLOW_FIELD_IPV6_DSCP:
1563                 info[idx] = (struct field_modify_info){1, 0,
1564                                         MLX5_MODI_OUT_IP_DSCP};
1565                 if (mask)
1566                         mask[idx] = 0x3f >> (6 - width);
1567                 break;
1568         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1569                 info[idx] = (struct field_modify_info){1, 0,
1570                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1571                 if (mask)
1572                         mask[idx] = 0xff >> (8 - width);
1573                 break;
1574         case RTE_FLOW_FIELD_IPV6_SRC:
1575                 if (mask) {
1576                         if (data->offset < 32) {
1577                                 info[idx] = (struct field_modify_info){4,
1578                                                 4 * idx,
1579                                                 MLX5_MODI_OUT_SIPV6_31_0};
1580                                 if (width < 32) {
1581                                         mask[idx] =
1582                                                 rte_cpu_to_be_32(0xffffffff >>
1583                                                                  (32 - width));
1584                                         width = 0;
1585                                 } else {
1586                                         mask[idx] = RTE_BE32(0xffffffff);
1587                                         width -= 32;
1588                                 }
1589                                 if (!width)
1590                                         break;
1591                                 ++idx;
1592                         }
1593                         if (data->offset < 64) {
1594                                 info[idx] = (struct field_modify_info){4,
1595                                                 4 * idx,
1596                                                 MLX5_MODI_OUT_SIPV6_63_32};
1597                                 if (width < 32) {
1598                                         mask[idx] =
1599                                                 rte_cpu_to_be_32(0xffffffff >>
1600                                                                  (32 - width));
1601                                         width = 0;
1602                                 } else {
1603                                         mask[idx] = RTE_BE32(0xffffffff);
1604                                         width -= 32;
1605                                 }
1606                                 if (!width)
1607                                         break;
1608                                 ++idx;
1609                         }
1610                         if (data->offset < 96) {
1611                                 info[idx] = (struct field_modify_info){4,
1612                                                 4 * idx,
1613                                                 MLX5_MODI_OUT_SIPV6_95_64};
1614                                 if (width < 32) {
1615                                         mask[idx] =
1616                                                 rte_cpu_to_be_32(0xffffffff >>
1617                                                                  (32 - width));
1618                                         width = 0;
1619                                 } else {
1620                                         mask[idx] = RTE_BE32(0xffffffff);
1621                                         width -= 32;
1622                                 }
1623                                 if (!width)
1624                                         break;
1625                                 ++idx;
1626                         }
1627                         info[idx] = (struct field_modify_info){4, 4 * idx,
1628                                                 MLX5_MODI_OUT_SIPV6_127_96};
1629                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1630                                                      (32 - width));
1631                 } else {
1632                         if (data->offset < 32)
1633                                 info[idx++] = (struct field_modify_info){4, 0,
1634                                                 MLX5_MODI_OUT_SIPV6_31_0};
1635                         if (data->offset < 64)
1636                                 info[idx++] = (struct field_modify_info){4, 0,
1637                                                 MLX5_MODI_OUT_SIPV6_63_32};
1638                         if (data->offset < 96)
1639                                 info[idx++] = (struct field_modify_info){4, 0,
1640                                                 MLX5_MODI_OUT_SIPV6_95_64};
1641                         if (data->offset < 128)
1642                                 info[idx++] = (struct field_modify_info){4, 0,
1643                                                 MLX5_MODI_OUT_SIPV6_127_96};
1644                 }
1645                 break;
1646         case RTE_FLOW_FIELD_IPV6_DST:
1647                 if (mask) {
1648                         if (data->offset < 32) {
1649                                 info[idx] = (struct field_modify_info){4,
1650                                                 4 * idx,
1651                                                 MLX5_MODI_OUT_DIPV6_31_0};
1652                                 if (width < 32) {
1653                                         mask[idx] =
1654                                                 rte_cpu_to_be_32(0xffffffff >>
1655                                                                  (32 - width));
1656                                         width = 0;
1657                                 } else {
1658                                         mask[idx] = RTE_BE32(0xffffffff);
1659                                         width -= 32;
1660                                 }
1661                                 if (!width)
1662                                         break;
1663                                 ++idx;
1664                         }
1665                         if (data->offset < 64) {
1666                                 info[idx] = (struct field_modify_info){4,
1667                                                 4 * idx,
1668                                                 MLX5_MODI_OUT_DIPV6_63_32};
1669                                 if (width < 32) {
1670                                         mask[idx] =
1671                                                 rte_cpu_to_be_32(0xffffffff >>
1672                                                                  (32 - width));
1673                                         width = 0;
1674                                 } else {
1675                                         mask[idx] = RTE_BE32(0xffffffff);
1676                                         width -= 32;
1677                                 }
1678                                 if (!width)
1679                                         break;
1680                                 ++idx;
1681                         }
1682                         if (data->offset < 96) {
1683                                 info[idx] = (struct field_modify_info){4,
1684                                                 4 * idx,
1685                                                 MLX5_MODI_OUT_DIPV6_95_64};
1686                                 if (width < 32) {
1687                                         mask[idx] =
1688                                                 rte_cpu_to_be_32(0xffffffff >>
1689                                                                  (32 - width));
1690                                         width = 0;
1691                                 } else {
1692                                         mask[idx] = RTE_BE32(0xffffffff);
1693                                         width -= 32;
1694                                 }
1695                                 if (!width)
1696                                         break;
1697                                 ++idx;
1698                         }
1699                         info[idx] = (struct field_modify_info){4, 4 * idx,
1700                                                 MLX5_MODI_OUT_DIPV6_127_96};
1701                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1702                                                      (32 - width));
1703                 } else {
1704                         if (data->offset < 32)
1705                                 info[idx++] = (struct field_modify_info){4, 0,
1706                                                 MLX5_MODI_OUT_DIPV6_31_0};
1707                         if (data->offset < 64)
1708                                 info[idx++] = (struct field_modify_info){4, 0,
1709                                                 MLX5_MODI_OUT_DIPV6_63_32};
1710                         if (data->offset < 96)
1711                                 info[idx++] = (struct field_modify_info){4, 0,
1712                                                 MLX5_MODI_OUT_DIPV6_95_64};
1713                         if (data->offset < 128)
1714                                 info[idx++] = (struct field_modify_info){4, 0,
1715                                                 MLX5_MODI_OUT_DIPV6_127_96};
1716                 }
1717                 break;
1718         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1719                 info[idx] = (struct field_modify_info){2, 0,
1720                                         MLX5_MODI_OUT_TCP_SPORT};
1721                 if (mask)
1722                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1723                 break;
1724         case RTE_FLOW_FIELD_TCP_PORT_DST:
1725                 info[idx] = (struct field_modify_info){2, 0,
1726                                         MLX5_MODI_OUT_TCP_DPORT};
1727                 if (mask)
1728                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1729                 break;
1730         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1731                 info[idx] = (struct field_modify_info){4, 0,
1732                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1733                 if (mask)
1734                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1735                                                      (32 - width));
1736                 break;
1737         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1738                 info[idx] = (struct field_modify_info){4, 0,
1739                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1740                 if (mask)
1741                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1742                                                      (32 - width));
1743                 break;
1744         case RTE_FLOW_FIELD_TCP_FLAGS:
1745                 info[idx] = (struct field_modify_info){2, 0,
1746                                         MLX5_MODI_OUT_TCP_FLAGS};
1747                 if (mask)
1748                         mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
1749                 break;
1750         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1751                 info[idx] = (struct field_modify_info){2, 0,
1752                                         MLX5_MODI_OUT_UDP_SPORT};
1753                 if (mask)
1754                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1755                 break;
1756         case RTE_FLOW_FIELD_UDP_PORT_DST:
1757                 info[idx] = (struct field_modify_info){2, 0,
1758                                         MLX5_MODI_OUT_UDP_DPORT};
1759                 if (mask)
1760                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1761                 break;
1762         case RTE_FLOW_FIELD_VXLAN_VNI:
1763                 /* not supported yet */
1764                 break;
1765         case RTE_FLOW_FIELD_GENEVE_VNI:
1766                 /* not supported yet*/
1767                 break;
1768         case RTE_FLOW_FIELD_GTP_TEID:
1769                 info[idx] = (struct field_modify_info){4, 0,
1770                                         MLX5_MODI_GTP_TEID};
1771                 if (mask)
1772                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1773                                                      (32 - width));
1774                 break;
1775         case RTE_FLOW_FIELD_TAG:
1776                 {
1777                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1778                                                    data->level, error);
1779                         if (reg < 0)
1780                                 return;
1781                         MLX5_ASSERT(reg != REG_NON);
1782                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1783                         info[idx] = (struct field_modify_info){4, 0,
1784                                                 reg_to_field[reg]};
1785                         if (mask)
1786                                 mask[idx] =
1787                                         rte_cpu_to_be_32(0xffffffff >>
1788                                                          (32 - width));
1789                 }
1790                 break;
1791         case RTE_FLOW_FIELD_MARK:
1792                 {
1793                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1794                                                        0, error);
1795                         if (reg < 0)
1796                                 return;
1797                         MLX5_ASSERT(reg != REG_NON);
1798                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1799                         info[idx] = (struct field_modify_info){4, 0,
1800                                                 reg_to_field[reg]};
1801                         if (mask)
1802                                 mask[idx] =
1803                                         rte_cpu_to_be_32(0xffffffff >>
1804                                                          (32 - width));
1805                 }
1806                 break;
1807         case RTE_FLOW_FIELD_META:
1808                 {
1809                         unsigned int xmeta = config->dv_xmeta_en;
1810                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1811                         if (reg < 0)
1812                                 return;
1813                         MLX5_ASSERT(reg != REG_NON);
1814                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1815                         if (xmeta == MLX5_XMETA_MODE_META16) {
1816                                 info[idx] = (struct field_modify_info){2, 0,
1817                                                         reg_to_field[reg]};
1818                                 if (mask)
1819                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1820                                                                 (16 - width));
1821                         } else if (xmeta == MLX5_XMETA_MODE_META32) {
1822                                 info[idx] = (struct field_modify_info){4, 0,
1823                                                         reg_to_field[reg]};
1824                                 if (mask)
1825                                         mask[idx] =
1826                                                 rte_cpu_to_be_32(0xffffffff >>
1827                                                                 (32 - width));
1828                         } else {
1829                                 MLX5_ASSERT(false);
1830                         }
1831                 }
1832                 break;
1833         case RTE_FLOW_FIELD_POINTER:
1834         case RTE_FLOW_FIELD_VALUE:
1835                 if (data->field == RTE_FLOW_FIELD_POINTER)
1836                         memcpy(&val, (void *)(uintptr_t)data->value,
1837                                sizeof(uint64_t));
1838                 else
1839                         val = data->value;
1840                 for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
1841                         if (mask[idx]) {
1842                                 if (dst_width == 48) {
1843                                         /*special case for MAC addresses */
1844                                         value[idx] = rte_cpu_to_be_16(val);
1845                                         val >>= 16;
1846                                         dst_width -= 16;
1847                                 } else if (dst_width > 16) {
1848                                         value[idx] = rte_cpu_to_be_32(val);
1849                                         val >>= 32;
1850                                 } else if (dst_width > 8) {
1851                                         value[idx] = rte_cpu_to_be_16(val);
1852                                         val >>= 16;
1853                                 } else {
1854                                         value[idx] = (uint8_t)val;
1855                                         val >>= 8;
1856                                 }
1857                                 if (!val)
1858                                         break;
1859                         }
1860                 }
1861                 break;
1862         default:
1863                 MLX5_ASSERT(false);
1864                 break;
1865         }
1866 }
1867
1868 /**
1869  * Convert modify_field action to DV specification.
1870  *
1871  * @param[in] dev
1872  *   Pointer to the rte_eth_dev structure.
1873  * @param[in,out] resource
1874  *   Pointer to the modify-header resource.
1875  * @param[in] action
1876  *   Pointer to action specification.
1877  * @param[in] attr
1878  *   Attributes of flow that includes this item.
1879  * @param[out] error
1880  *   Pointer to the error structure.
1881  *
1882  * @return
1883  *   0 on success, a negative errno value otherwise and rte_errno is set.
1884  */
1885 static int
1886 flow_dv_convert_action_modify_field
1887                         (struct rte_eth_dev *dev,
1888                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1889                          const struct rte_flow_action *action,
1890                          const struct rte_flow_attr *attr,
1891                          struct rte_flow_error *error)
1892 {
1893         struct mlx5_priv *priv = dev->data->dev_private;
1894         struct mlx5_dev_config *config = &priv->config;
1895         const struct rte_flow_action_modify_field *conf =
1896                 (const struct rte_flow_action_modify_field *)(action->conf);
1897         struct rte_flow_item item;
1898         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1899                                                                 {0, 0, 0} };
1900         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1901                                                                 {0, 0, 0} };
1902         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1903         uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1904         uint32_t type;
1905         uint32_t dst_width = mlx5_flow_item_field_width(config,
1906                                                         conf->dst.field);
1907
1908         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1909                 conf->src.field == RTE_FLOW_FIELD_VALUE) {
1910                 type = MLX5_MODIFICATION_TYPE_SET;
1911                 /** For SET fill the destination field (field) first. */
1912                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1913                         value, conf->width, dst_width, dev, attr, error);
1914                 /** Then copy immediate value from source as per mask. */
1915                 mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask,
1916                         value, conf->width, dst_width, dev, attr, error);
1917                 item.spec = &value;
1918         } else {
1919                 type = MLX5_MODIFICATION_TYPE_COPY;
1920                 /** For COPY fill the destination field (dcopy) without mask. */
1921                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1922                         value, conf->width, dst_width, dev, attr, error);
1923                 /** Then construct the source field (field) with mask. */
1924                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1925                         value, conf->width, dst_width, dev, attr, error);
1926         }
1927         item.mask = &mask;
1928         return flow_dv_convert_modify_action(&item,
1929                         field, dcopy, resource, type, error);
1930 }
1931
1932 /**
1933  * Validate MARK item.
1934  *
1935  * @param[in] dev
1936  *   Pointer to the rte_eth_dev structure.
1937  * @param[in] item
1938  *   Item specification.
1939  * @param[in] attr
1940  *   Attributes of flow that includes this item.
1941  * @param[out] error
1942  *   Pointer to error structure.
1943  *
1944  * @return
1945  *   0 on success, a negative errno value otherwise and rte_errno is set.
1946  */
1947 static int
1948 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1949                            const struct rte_flow_item *item,
1950                            const struct rte_flow_attr *attr __rte_unused,
1951                            struct rte_flow_error *error)
1952 {
1953         struct mlx5_priv *priv = dev->data->dev_private;
1954         struct mlx5_dev_config *config = &priv->config;
1955         const struct rte_flow_item_mark *spec = item->spec;
1956         const struct rte_flow_item_mark *mask = item->mask;
1957         const struct rte_flow_item_mark nic_mask = {
1958                 .id = priv->sh->dv_mark_mask,
1959         };
1960         int ret;
1961
1962         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1963                 return rte_flow_error_set(error, ENOTSUP,
1964                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1965                                           "extended metadata feature"
1966                                           " isn't enabled");
1967         if (!mlx5_flow_ext_mreg_supported(dev))
1968                 return rte_flow_error_set(error, ENOTSUP,
1969                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1970                                           "extended metadata register"
1971                                           " isn't supported");
1972         if (!nic_mask.id)
1973                 return rte_flow_error_set(error, ENOTSUP,
1974                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1975                                           "extended metadata register"
1976                                           " isn't available");
1977         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1978         if (ret < 0)
1979                 return ret;
1980         if (!spec)
1981                 return rte_flow_error_set(error, EINVAL,
1982                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1983                                           item->spec,
1984                                           "data cannot be empty");
1985         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1986                 return rte_flow_error_set(error, EINVAL,
1987                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1988                                           &spec->id,
1989                                           "mark id exceeds the limit");
1990         if (!mask)
1991                 mask = &nic_mask;
1992         if (!mask->id)
1993                 return rte_flow_error_set(error, EINVAL,
1994                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1995                                         "mask cannot be zero");
1996
1997         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1998                                         (const uint8_t *)&nic_mask,
1999                                         sizeof(struct rte_flow_item_mark),
2000                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2001         if (ret < 0)
2002                 return ret;
2003         return 0;
2004 }
2005
2006 /**
2007  * Validate META item.
2008  *
2009  * @param[in] dev
2010  *   Pointer to the rte_eth_dev structure.
2011  * @param[in] item
2012  *   Item specification.
2013  * @param[in] attr
2014  *   Attributes of flow that includes this item.
2015  * @param[out] error
2016  *   Pointer to error structure.
2017  *
2018  * @return
2019  *   0 on success, a negative errno value otherwise and rte_errno is set.
2020  */
2021 static int
2022 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
2023                            const struct rte_flow_item *item,
2024                            const struct rte_flow_attr *attr,
2025                            struct rte_flow_error *error)
2026 {
2027         struct mlx5_priv *priv = dev->data->dev_private;
2028         struct mlx5_dev_config *config = &priv->config;
2029         const struct rte_flow_item_meta *spec = item->spec;
2030         const struct rte_flow_item_meta *mask = item->mask;
2031         struct rte_flow_item_meta nic_mask = {
2032                 .data = UINT32_MAX
2033         };
2034         int reg;
2035         int ret;
2036
2037         if (!spec)
2038                 return rte_flow_error_set(error, EINVAL,
2039                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2040                                           item->spec,
2041                                           "data cannot be empty");
2042         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2043                 if (!mlx5_flow_ext_mreg_supported(dev))
2044                         return rte_flow_error_set(error, ENOTSUP,
2045                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2046                                           "extended metadata register"
2047                                           " isn't supported");
2048                 reg = flow_dv_get_metadata_reg(dev, attr, error);
2049                 if (reg < 0)
2050                         return reg;
2051                 if (reg == REG_NON)
2052                         return rte_flow_error_set(error, ENOTSUP,
2053                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2054                                         "unavalable extended metadata register");
2055                 if (reg == REG_B)
2056                         return rte_flow_error_set(error, ENOTSUP,
2057                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2058                                           "match on reg_b "
2059                                           "isn't supported");
2060                 if (reg != REG_A)
2061                         nic_mask.data = priv->sh->dv_meta_mask;
2062         } else {
2063                 if (attr->transfer)
2064                         return rte_flow_error_set(error, ENOTSUP,
2065                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2066                                         "extended metadata feature "
2067                                         "should be enabled when "
2068                                         "meta item is requested "
2069                                         "with e-switch mode ");
2070                 if (attr->ingress)
2071                         return rte_flow_error_set(error, ENOTSUP,
2072                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2073                                         "match on metadata for ingress "
2074                                         "is not supported in legacy "
2075                                         "metadata mode");
2076         }
2077         if (!mask)
2078                 mask = &rte_flow_item_meta_mask;
2079         if (!mask->data)
2080                 return rte_flow_error_set(error, EINVAL,
2081                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2082                                         "mask cannot be zero");
2083
2084         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2085                                         (const uint8_t *)&nic_mask,
2086                                         sizeof(struct rte_flow_item_meta),
2087                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2088         return ret;
2089 }
2090
2091 /**
2092  * Validate TAG item.
2093  *
2094  * @param[in] dev
2095  *   Pointer to the rte_eth_dev structure.
2096  * @param[in] item
2097  *   Item specification.
2098  * @param[in] attr
2099  *   Attributes of flow that includes this item.
2100  * @param[out] error
2101  *   Pointer to error structure.
2102  *
2103  * @return
2104  *   0 on success, a negative errno value otherwise and rte_errno is set.
2105  */
2106 static int
2107 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2108                           const struct rte_flow_item *item,
2109                           const struct rte_flow_attr *attr __rte_unused,
2110                           struct rte_flow_error *error)
2111 {
2112         const struct rte_flow_item_tag *spec = item->spec;
2113         const struct rte_flow_item_tag *mask = item->mask;
2114         const struct rte_flow_item_tag nic_mask = {
2115                 .data = RTE_BE32(UINT32_MAX),
2116                 .index = 0xff,
2117         };
2118         int ret;
2119
2120         if (!mlx5_flow_ext_mreg_supported(dev))
2121                 return rte_flow_error_set(error, ENOTSUP,
2122                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2123                                           "extensive metadata register"
2124                                           " isn't supported");
2125         if (!spec)
2126                 return rte_flow_error_set(error, EINVAL,
2127                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2128                                           item->spec,
2129                                           "data cannot be empty");
2130         if (!mask)
2131                 mask = &rte_flow_item_tag_mask;
2132         if (!mask->data)
2133                 return rte_flow_error_set(error, EINVAL,
2134                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2135                                         "mask cannot be zero");
2136
2137         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2138                                         (const uint8_t *)&nic_mask,
2139                                         sizeof(struct rte_flow_item_tag),
2140                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2141         if (ret < 0)
2142                 return ret;
2143         if (mask->index != 0xff)
2144                 return rte_flow_error_set(error, EINVAL,
2145                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2146                                           "partial mask for tag index"
2147                                           " is not supported");
2148         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2149         if (ret < 0)
2150                 return ret;
2151         MLX5_ASSERT(ret != REG_NON);
2152         return 0;
2153 }
2154
2155 /**
2156  * Validate vport item.
2157  *
2158  * @param[in] dev
2159  *   Pointer to the rte_eth_dev structure.
2160  * @param[in] item
2161  *   Item specification.
2162  * @param[in] attr
2163  *   Attributes of flow that includes this item.
2164  * @param[in] item_flags
2165  *   Bit-fields that holds the items detected until now.
2166  * @param[out] error
2167  *   Pointer to error structure.
2168  *
2169  * @return
2170  *   0 on success, a negative errno value otherwise and rte_errno is set.
2171  */
2172 static int
2173 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2174                               const struct rte_flow_item *item,
2175                               const struct rte_flow_attr *attr,
2176                               uint64_t item_flags,
2177                               struct rte_flow_error *error)
2178 {
2179         const struct rte_flow_item_port_id *spec = item->spec;
2180         const struct rte_flow_item_port_id *mask = item->mask;
2181         const struct rte_flow_item_port_id switch_mask = {
2182                         .id = 0xffffffff,
2183         };
2184         struct mlx5_priv *esw_priv;
2185         struct mlx5_priv *dev_priv;
2186         int ret;
2187
2188         if (!attr->transfer)
2189                 return rte_flow_error_set(error, EINVAL,
2190                                           RTE_FLOW_ERROR_TYPE_ITEM,
2191                                           NULL,
2192                                           "match on port id is valid only"
2193                                           " when transfer flag is enabled");
2194         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2195                 return rte_flow_error_set(error, ENOTSUP,
2196                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2197                                           "multiple source ports are not"
2198                                           " supported");
2199         if (!mask)
2200                 mask = &switch_mask;
2201         if (mask->id != 0xffffffff)
2202                 return rte_flow_error_set(error, ENOTSUP,
2203                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2204                                            mask,
2205                                            "no support for partial mask on"
2206                                            " \"id\" field");
2207         ret = mlx5_flow_item_acceptable
2208                                 (item, (const uint8_t *)mask,
2209                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2210                                  sizeof(struct rte_flow_item_port_id),
2211                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2212         if (ret)
2213                 return ret;
2214         if (!spec)
2215                 return 0;
2216         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2217         if (!esw_priv)
2218                 return rte_flow_error_set(error, rte_errno,
2219                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2220                                           "failed to obtain E-Switch info for"
2221                                           " port");
2222         dev_priv = mlx5_dev_to_eswitch_info(dev);
2223         if (!dev_priv)
2224                 return rte_flow_error_set(error, rte_errno,
2225                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2226                                           NULL,
2227                                           "failed to obtain E-Switch info");
2228         if (esw_priv->domain_id != dev_priv->domain_id)
2229                 return rte_flow_error_set(error, EINVAL,
2230                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2231                                           "cannot match on a port from a"
2232                                           " different E-Switch");
2233         return 0;
2234 }
2235
2236 /**
2237  * Validate VLAN item.
2238  *
2239  * @param[in] item
2240  *   Item specification.
2241  * @param[in] item_flags
2242  *   Bit-fields that holds the items detected until now.
2243  * @param[in] dev
2244  *   Ethernet device flow is being created on.
2245  * @param[out] error
2246  *   Pointer to error structure.
2247  *
2248  * @return
2249  *   0 on success, a negative errno value otherwise and rte_errno is set.
2250  */
2251 static int
2252 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2253                            uint64_t item_flags,
2254                            struct rte_eth_dev *dev,
2255                            struct rte_flow_error *error)
2256 {
2257         const struct rte_flow_item_vlan *mask = item->mask;
2258         const struct rte_flow_item_vlan nic_mask = {
2259                 .tci = RTE_BE16(UINT16_MAX),
2260                 .inner_type = RTE_BE16(UINT16_MAX),
2261                 .has_more_vlan = 1,
2262         };
2263         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2264         int ret;
2265         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2266                                         MLX5_FLOW_LAYER_INNER_L4) :
2267                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2268                                         MLX5_FLOW_LAYER_OUTER_L4);
2269         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2270                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2271
2272         if (item_flags & vlanm)
2273                 return rte_flow_error_set(error, EINVAL,
2274                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2275                                           "multiple VLAN layers not supported");
2276         else if ((item_flags & l34m) != 0)
2277                 return rte_flow_error_set(error, EINVAL,
2278                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2279                                           "VLAN cannot follow L3/L4 layer");
2280         if (!mask)
2281                 mask = &rte_flow_item_vlan_mask;
2282         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2283                                         (const uint8_t *)&nic_mask,
2284                                         sizeof(struct rte_flow_item_vlan),
2285                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2286         if (ret)
2287                 return ret;
2288         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2289                 struct mlx5_priv *priv = dev->data->dev_private;
2290
2291                 if (priv->vmwa_context) {
2292                         /*
2293                          * Non-NULL context means we have a virtual machine
2294                          * and SR-IOV enabled, we have to create VLAN interface
2295                          * to make hypervisor to setup E-Switch vport
2296                          * context correctly. We avoid creating the multiple
2297                          * VLAN interfaces, so we cannot support VLAN tag mask.
2298                          */
2299                         return rte_flow_error_set(error, EINVAL,
2300                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2301                                                   item,
2302                                                   "VLAN tag mask is not"
2303                                                   " supported in virtual"
2304                                                   " environment");
2305                 }
2306         }
2307         return 0;
2308 }
2309
2310 /*
2311  * GTP flags are contained in 1 byte of the format:
2312  * -------------------------------------------
2313  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2314  * |-----------------------------------------|
2315  * | value | Version | PT | Res | E | S | PN |
2316  * -------------------------------------------
2317  *
2318  * Matching is supported only for GTP flags E, S, PN.
2319  */
2320 #define MLX5_GTP_FLAGS_MASK     0x07
2321
2322 /**
2323  * Validate GTP item.
2324  *
2325  * @param[in] dev
2326  *   Pointer to the rte_eth_dev structure.
2327  * @param[in] item
2328  *   Item specification.
2329  * @param[in] item_flags
2330  *   Bit-fields that holds the items detected until now.
2331  * @param[out] error
2332  *   Pointer to error structure.
2333  *
2334  * @return
2335  *   0 on success, a negative errno value otherwise and rte_errno is set.
2336  */
2337 static int
2338 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2339                           const struct rte_flow_item *item,
2340                           uint64_t item_flags,
2341                           struct rte_flow_error *error)
2342 {
2343         struct mlx5_priv *priv = dev->data->dev_private;
2344         const struct rte_flow_item_gtp *spec = item->spec;
2345         const struct rte_flow_item_gtp *mask = item->mask;
2346         const struct rte_flow_item_gtp nic_mask = {
2347                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2348                 .msg_type = 0xff,
2349                 .teid = RTE_BE32(0xffffffff),
2350         };
2351
2352         if (!priv->config.hca_attr.tunnel_stateless_gtp)
2353                 return rte_flow_error_set(error, ENOTSUP,
2354                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2355                                           "GTP support is not enabled");
2356         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2357                 return rte_flow_error_set(error, ENOTSUP,
2358                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2359                                           "multiple tunnel layers not"
2360                                           " supported");
2361         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2362                 return rte_flow_error_set(error, EINVAL,
2363                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2364                                           "no outer UDP layer found");
2365         if (!mask)
2366                 mask = &rte_flow_item_gtp_mask;
2367         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2368                 return rte_flow_error_set(error, ENOTSUP,
2369                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2370                                           "Match is supported for GTP"
2371                                           " flags only");
2372         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2373                                          (const uint8_t *)&nic_mask,
2374                                          sizeof(struct rte_flow_item_gtp),
2375                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2376 }
2377
2378 /**
2379  * Validate GTP PSC item.
2380  *
2381  * @param[in] item
2382  *   Item specification.
2383  * @param[in] last_item
2384  *   Previous validated item in the pattern items.
2385  * @param[in] gtp_item
2386  *   Previous GTP item specification.
2387  * @param[in] attr
2388  *   Pointer to flow attributes.
2389  * @param[out] error
2390  *   Pointer to error structure.
2391  *
2392  * @return
2393  *   0 on success, a negative errno value otherwise and rte_errno is set.
2394  */
2395 static int
2396 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2397                               uint64_t last_item,
2398                               const struct rte_flow_item *gtp_item,
2399                               const struct rte_flow_attr *attr,
2400                               struct rte_flow_error *error)
2401 {
2402         const struct rte_flow_item_gtp *gtp_spec;
2403         const struct rte_flow_item_gtp *gtp_mask;
2404         const struct rte_flow_item_gtp_psc *spec;
2405         const struct rte_flow_item_gtp_psc *mask;
2406         const struct rte_flow_item_gtp_psc nic_mask = {
2407                 .pdu_type = 0xFF,
2408                 .qfi = 0xFF,
2409         };
2410
2411         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2412                 return rte_flow_error_set
2413                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2414                          "GTP PSC item must be preceded with GTP item");
2415         gtp_spec = gtp_item->spec;
2416         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2417         /* GTP spec and E flag is requested to match zero. */
2418         if (gtp_spec &&
2419                 (gtp_mask->v_pt_rsv_flags &
2420                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2421                 return rte_flow_error_set
2422                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2423                          "GTP E flag must be 1 to match GTP PSC");
2424         /* Check the flow is not created in group zero. */
2425         if (!attr->transfer && !attr->group)
2426                 return rte_flow_error_set
2427                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2428                          "GTP PSC is not supported for group 0");
2429         /* GTP spec is here and E flag is requested to match zero. */
2430         if (!item->spec)
2431                 return 0;
2432         spec = item->spec;
2433         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2434         if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE)
2435                 return rte_flow_error_set
2436                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2437                          "PDU type should be smaller than 16");
2438         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2439                                          (const uint8_t *)&nic_mask,
2440                                          sizeof(struct rte_flow_item_gtp_psc),
2441                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2442 }
2443
2444 /**
2445  * Validate IPV4 item.
2446  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2447  * add specific validation of fragment_offset field,
2448  *
2449  * @param[in] item
2450  *   Item specification.
2451  * @param[in] item_flags
2452  *   Bit-fields that holds the items detected until now.
2453  * @param[out] error
2454  *   Pointer to error structure.
2455  *
2456  * @return
2457  *   0 on success, a negative errno value otherwise and rte_errno is set.
2458  */
2459 static int
2460 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
2461                            uint64_t item_flags,
2462                            uint64_t last_item,
2463                            uint16_t ether_type,
2464                            struct rte_flow_error *error)
2465 {
2466         int ret;
2467         const struct rte_flow_item_ipv4 *spec = item->spec;
2468         const struct rte_flow_item_ipv4 *last = item->last;
2469         const struct rte_flow_item_ipv4 *mask = item->mask;
2470         rte_be16_t fragment_offset_spec = 0;
2471         rte_be16_t fragment_offset_last = 0;
2472         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
2473                 .hdr = {
2474                         .src_addr = RTE_BE32(0xffffffff),
2475                         .dst_addr = RTE_BE32(0xffffffff),
2476                         .type_of_service = 0xff,
2477                         .fragment_offset = RTE_BE16(0xffff),
2478                         .next_proto_id = 0xff,
2479                         .time_to_live = 0xff,
2480                 },
2481         };
2482
2483         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2484                                            ether_type, &nic_ipv4_mask,
2485                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2486         if (ret < 0)
2487                 return ret;
2488         if (spec && mask)
2489                 fragment_offset_spec = spec->hdr.fragment_offset &
2490                                        mask->hdr.fragment_offset;
2491         if (!fragment_offset_spec)
2492                 return 0;
2493         /*
2494          * spec and mask are valid, enforce using full mask to make sure the
2495          * complete value is used correctly.
2496          */
2497         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2498                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2499                 return rte_flow_error_set(error, EINVAL,
2500                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2501                                           item, "must use full mask for"
2502                                           " fragment_offset");
2503         /*
2504          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2505          * indicating this is 1st fragment of fragmented packet.
2506          * This is not yet supported in MLX5, return appropriate error message.
2507          */
2508         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2509                 return rte_flow_error_set(error, ENOTSUP,
2510                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2511                                           "match on first fragment not "
2512                                           "supported");
2513         if (fragment_offset_spec && !last)
2514                 return rte_flow_error_set(error, ENOTSUP,
2515                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2516                                           "specified value not supported");
2517         /* spec and last are valid, validate the specified range. */
2518         fragment_offset_last = last->hdr.fragment_offset &
2519                                mask->hdr.fragment_offset;
2520         /*
2521          * Match on fragment_offset spec 0x2001 and last 0x3fff
2522          * means MF is 1 and frag-offset is > 0.
2523          * This packet is fragment 2nd and onward, excluding last.
2524          * This is not yet supported in MLX5, return appropriate
2525          * error message.
2526          */
2527         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2528             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2529                 return rte_flow_error_set(error, ENOTSUP,
2530                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2531                                           last, "match on following "
2532                                           "fragments not supported");
2533         /*
2534          * Match on fragment_offset spec 0x0001 and last 0x1fff
2535          * means MF is 0 and frag-offset is > 0.
2536          * This packet is last fragment of fragmented packet.
2537          * This is not yet supported in MLX5, return appropriate
2538          * error message.
2539          */
2540         if (fragment_offset_spec == RTE_BE16(1) &&
2541             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2542                 return rte_flow_error_set(error, ENOTSUP,
2543                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2544                                           last, "match on last "
2545                                           "fragment not supported");
2546         /*
2547          * Match on fragment_offset spec 0x0001 and last 0x3fff
2548          * means MF and/or frag-offset is not 0.
2549          * This is a fragmented packet.
2550          * Other range values are invalid and rejected.
2551          */
2552         if (!(fragment_offset_spec == RTE_BE16(1) &&
2553               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2554                 return rte_flow_error_set(error, ENOTSUP,
2555                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2556                                           "specified range not supported");
2557         return 0;
2558 }
2559
2560 /**
2561  * Validate IPV6 fragment extension item.
2562  *
2563  * @param[in] item
2564  *   Item specification.
2565  * @param[in] item_flags
2566  *   Bit-fields that holds the items detected until now.
2567  * @param[out] error
2568  *   Pointer to error structure.
2569  *
2570  * @return
2571  *   0 on success, a negative errno value otherwise and rte_errno is set.
2572  */
2573 static int
2574 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2575                                     uint64_t item_flags,
2576                                     struct rte_flow_error *error)
2577 {
2578         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2579         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2580         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2581         rte_be16_t frag_data_spec = 0;
2582         rte_be16_t frag_data_last = 0;
2583         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2584         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2585                                       MLX5_FLOW_LAYER_OUTER_L4;
2586         int ret = 0;
2587         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2588                 .hdr = {
2589                         .next_header = 0xff,
2590                         .frag_data = RTE_BE16(0xffff),
2591                 },
2592         };
2593
2594         if (item_flags & l4m)
2595                 return rte_flow_error_set(error, EINVAL,
2596                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2597                                           "ipv6 fragment extension item cannot "
2598                                           "follow L4 item.");
2599         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2600             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2601                 return rte_flow_error_set(error, EINVAL,
2602                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2603                                           "ipv6 fragment extension item must "
2604                                           "follow ipv6 item");
2605         if (spec && mask)
2606                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2607         if (!frag_data_spec)
2608                 return 0;
2609         /*
2610          * spec and mask are valid, enforce using full mask to make sure the
2611          * complete value is used correctly.
2612          */
2613         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2614                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2615                 return rte_flow_error_set(error, EINVAL,
2616                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2617                                           item, "must use full mask for"
2618                                           " frag_data");
2619         /*
2620          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2621          * This is 1st fragment of fragmented packet.
2622          */
2623         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2624                 return rte_flow_error_set(error, ENOTSUP,
2625                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2626                                           "match on first fragment not "
2627                                           "supported");
2628         if (frag_data_spec && !last)
2629                 return rte_flow_error_set(error, EINVAL,
2630                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2631                                           "specified value not supported");
2632         ret = mlx5_flow_item_acceptable
2633                                 (item, (const uint8_t *)mask,
2634                                  (const uint8_t *)&nic_mask,
2635                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2636                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2637         if (ret)
2638                 return ret;
2639         /* spec and last are valid, validate the specified range. */
2640         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2641         /*
2642          * Match on frag_data spec 0x0009 and last 0xfff9
2643          * means M is 1 and frag-offset is > 0.
2644          * This packet is fragment 2nd and onward, excluding last.
2645          * This is not yet supported in MLX5, return appropriate
2646          * error message.
2647          */
2648         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2649                                        RTE_IPV6_EHDR_MF_MASK) &&
2650             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2651                 return rte_flow_error_set(error, ENOTSUP,
2652                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2653                                           last, "match on following "
2654                                           "fragments not supported");
2655         /*
2656          * Match on frag_data spec 0x0008 and last 0xfff8
2657          * means M is 0 and frag-offset is > 0.
2658          * This packet is last fragment of fragmented packet.
2659          * This is not yet supported in MLX5, return appropriate
2660          * error message.
2661          */
2662         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2663             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2664                 return rte_flow_error_set(error, ENOTSUP,
2665                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2666                                           last, "match on last "
2667                                           "fragment not supported");
2668         /* Other range values are invalid and rejected. */
2669         return rte_flow_error_set(error, EINVAL,
2670                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2671                                   "specified range not supported");
2672 }
2673
2674 /*
2675  * Validate ASO CT item.
2676  *
2677  * @param[in] dev
2678  *   Pointer to the rte_eth_dev structure.
2679  * @param[in] item
2680  *   Item specification.
2681  * @param[in] item_flags
2682  *   Pointer to bit-fields that holds the items detected until now.
2683  * @param[out] error
2684  *   Pointer to error structure.
2685  *
2686  * @return
2687  *   0 on success, a negative errno value otherwise and rte_errno is set.
2688  */
2689 static int
2690 flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
2691                              const struct rte_flow_item *item,
2692                              uint64_t *item_flags,
2693                              struct rte_flow_error *error)
2694 {
2695         const struct rte_flow_item_conntrack *spec = item->spec;
2696         const struct rte_flow_item_conntrack *mask = item->mask;
2697         RTE_SET_USED(dev);
2698         uint32_t flags;
2699
2700         if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
2701                 return rte_flow_error_set(error, EINVAL,
2702                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2703                                           "Only one CT is supported");
2704         if (!mask)
2705                 mask = &rte_flow_item_conntrack_mask;
2706         flags = spec->flags & mask->flags;
2707         if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
2708             ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
2709              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
2710              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
2711                 return rte_flow_error_set(error, EINVAL,
2712                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2713                                           "Conflict status bits");
2714         /* State change also needs to be considered. */
2715         *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
2716         return 0;
2717 }
2718
2719 /**
2720  * Validate the pop VLAN action.
2721  *
2722  * @param[in] dev
2723  *   Pointer to the rte_eth_dev structure.
2724  * @param[in] action_flags
2725  *   Holds the actions detected until now.
2726  * @param[in] action
2727  *   Pointer to the pop vlan action.
2728  * @param[in] item_flags
2729  *   The items found in this flow rule.
2730  * @param[in] attr
2731  *   Pointer to flow attributes.
2732  * @param[out] error
2733  *   Pointer to error structure.
2734  *
2735  * @return
2736  *   0 on success, a negative errno value otherwise and rte_errno is set.
2737  */
2738 static int
2739 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2740                                  uint64_t action_flags,
2741                                  const struct rte_flow_action *action,
2742                                  uint64_t item_flags,
2743                                  const struct rte_flow_attr *attr,
2744                                  struct rte_flow_error *error)
2745 {
2746         const struct mlx5_priv *priv = dev->data->dev_private;
2747
2748         (void)action;
2749         (void)attr;
2750         if (!priv->sh->pop_vlan_action)
2751                 return rte_flow_error_set(error, ENOTSUP,
2752                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2753                                           NULL,
2754                                           "pop vlan action is not supported");
2755         if (attr->egress)
2756                 return rte_flow_error_set(error, ENOTSUP,
2757                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2758                                           NULL,
2759                                           "pop vlan action not supported for "
2760                                           "egress");
2761         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2762                 return rte_flow_error_set(error, ENOTSUP,
2763                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2764                                           "no support for multiple VLAN "
2765                                           "actions");
2766         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2767         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2768             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2769                 return rte_flow_error_set(error, ENOTSUP,
2770                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2771                                           NULL,
2772                                           "cannot pop vlan after decap without "
2773                                           "match on inner vlan in the flow");
2774         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2775         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2776             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2777                 return rte_flow_error_set(error, ENOTSUP,
2778                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2779                                           NULL,
2780                                           "cannot pop vlan without a "
2781                                           "match on (outer) vlan in the flow");
2782         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2783                 return rte_flow_error_set(error, EINVAL,
2784                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2785                                           "wrong action order, port_id should "
2786                                           "be after pop VLAN action");
2787         if (!attr->transfer && priv->representor)
2788                 return rte_flow_error_set(error, ENOTSUP,
2789                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2790                                           "pop vlan action for VF representor "
2791                                           "not supported on NIC table");
2792         return 0;
2793 }
2794
2795 /**
2796  * Get VLAN default info from vlan match info.
2797  *
2798  * @param[in] items
2799  *   the list of item specifications.
2800  * @param[out] vlan
2801  *   pointer VLAN info to fill to.
2802  *
2803  * @return
2804  *   0 on success, a negative errno value otherwise and rte_errno is set.
2805  */
2806 static void
2807 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2808                                   struct rte_vlan_hdr *vlan)
2809 {
2810         const struct rte_flow_item_vlan nic_mask = {
2811                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2812                                 MLX5DV_FLOW_VLAN_VID_MASK),
2813                 .inner_type = RTE_BE16(0xffff),
2814         };
2815
2816         if (items == NULL)
2817                 return;
2818         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2819                 int type = items->type;
2820
2821                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2822                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2823                         break;
2824         }
2825         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2826                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2827                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2828
2829                 /* If VLAN item in pattern doesn't contain data, return here. */
2830                 if (!vlan_v)
2831                         return;
2832                 if (!vlan_m)
2833                         vlan_m = &nic_mask;
2834                 /* Only full match values are accepted */
2835                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2836                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2837                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2838                         vlan->vlan_tci |=
2839                                 rte_be_to_cpu_16(vlan_v->tci &
2840                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2841                 }
2842                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2843                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2844                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2845                         vlan->vlan_tci |=
2846                                 rte_be_to_cpu_16(vlan_v->tci &
2847                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2848                 }
2849                 if (vlan_m->inner_type == nic_mask.inner_type)
2850                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2851                                                            vlan_m->inner_type);
2852         }
2853 }
2854
2855 /**
2856  * Validate the push VLAN action.
2857  *
2858  * @param[in] dev
2859  *   Pointer to the rte_eth_dev structure.
2860  * @param[in] action_flags
2861  *   Holds the actions detected until now.
2862  * @param[in] item_flags
2863  *   The items found in this flow rule.
2864  * @param[in] action
2865  *   Pointer to the action structure.
2866  * @param[in] attr
2867  *   Pointer to flow attributes
2868  * @param[out] error
2869  *   Pointer to error structure.
2870  *
2871  * @return
2872  *   0 on success, a negative errno value otherwise and rte_errno is set.
2873  */
2874 static int
2875 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2876                                   uint64_t action_flags,
2877                                   const struct rte_flow_item_vlan *vlan_m,
2878                                   const struct rte_flow_action *action,
2879                                   const struct rte_flow_attr *attr,
2880                                   struct rte_flow_error *error)
2881 {
2882         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2883         const struct mlx5_priv *priv = dev->data->dev_private;
2884
2885         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2886             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2887                 return rte_flow_error_set(error, EINVAL,
2888                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2889                                           "invalid vlan ethertype");
2890         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2891                 return rte_flow_error_set(error, EINVAL,
2892                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2893                                           "wrong action order, port_id should "
2894                                           "be after push VLAN");
2895         if (!attr->transfer && priv->representor)
2896                 return rte_flow_error_set(error, ENOTSUP,
2897                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2898                                           "push vlan action for VF representor "
2899                                           "not supported on NIC table");
2900         if (vlan_m &&
2901             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2902             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2903                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2904             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2905             !(mlx5_flow_find_action
2906                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2907                 return rte_flow_error_set(error, EINVAL,
2908                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2909                                           "not full match mask on VLAN PCP and "
2910                                           "there is no of_set_vlan_pcp action, "
2911                                           "push VLAN action cannot figure out "
2912                                           "PCP value");
2913         if (vlan_m &&
2914             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2915             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2916                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2917             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2918             !(mlx5_flow_find_action
2919                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2920                 return rte_flow_error_set(error, EINVAL,
2921                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2922                                           "not full match mask on VLAN VID and "
2923                                           "there is no of_set_vlan_vid action, "
2924                                           "push VLAN action cannot figure out "
2925                                           "VID value");
2926         (void)attr;
2927         return 0;
2928 }
2929
2930 /**
2931  * Validate the set VLAN PCP.
2932  *
2933  * @param[in] action_flags
2934  *   Holds the actions detected until now.
2935  * @param[in] actions
2936  *   Pointer to the list of actions remaining in the flow rule.
2937  * @param[out] error
2938  *   Pointer to error structure.
2939  *
2940  * @return
2941  *   0 on success, a negative errno value otherwise and rte_errno is set.
2942  */
2943 static int
2944 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2945                                      const struct rte_flow_action actions[],
2946                                      struct rte_flow_error *error)
2947 {
2948         const struct rte_flow_action *action = actions;
2949         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2950
2951         if (conf->vlan_pcp > 7)
2952                 return rte_flow_error_set(error, EINVAL,
2953                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2954                                           "VLAN PCP value is too big");
2955         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2956                 return rte_flow_error_set(error, ENOTSUP,
2957                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2958                                           "set VLAN PCP action must follow "
2959                                           "the push VLAN action");
2960         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2961                 return rte_flow_error_set(error, ENOTSUP,
2962                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2963                                           "Multiple VLAN PCP modification are "
2964                                           "not supported");
2965         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2966                 return rte_flow_error_set(error, EINVAL,
2967                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2968                                           "wrong action order, port_id should "
2969                                           "be after set VLAN PCP");
2970         return 0;
2971 }
2972
2973 /**
2974  * Validate the set VLAN VID.
2975  *
2976  * @param[in] item_flags
2977  *   Holds the items detected in this rule.
2978  * @param[in] action_flags
2979  *   Holds the actions detected until now.
2980  * @param[in] actions
2981  *   Pointer to the list of actions remaining in the flow rule.
2982  * @param[out] error
2983  *   Pointer to error structure.
2984  *
2985  * @return
2986  *   0 on success, a negative errno value otherwise and rte_errno is set.
2987  */
2988 static int
2989 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2990                                      uint64_t action_flags,
2991                                      const struct rte_flow_action actions[],
2992                                      struct rte_flow_error *error)
2993 {
2994         const struct rte_flow_action *action = actions;
2995         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2996
2997         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2998                 return rte_flow_error_set(error, EINVAL,
2999                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3000                                           "VLAN VID value is too big");
3001         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
3002             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
3003                 return rte_flow_error_set(error, ENOTSUP,
3004                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3005                                           "set VLAN VID action must follow push"
3006                                           " VLAN action or match on VLAN item");
3007         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
3008                 return rte_flow_error_set(error, ENOTSUP,
3009                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3010                                           "Multiple VLAN VID modifications are "
3011                                           "not supported");
3012         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
3013                 return rte_flow_error_set(error, EINVAL,
3014                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3015                                           "wrong action order, port_id should "
3016                                           "be after set VLAN VID");
3017         return 0;
3018 }
3019
3020 /*
3021  * Validate the FLAG action.
3022  *
3023  * @param[in] dev
3024  *   Pointer to the rte_eth_dev structure.
3025  * @param[in] action_flags
3026  *   Holds the actions detected until now.
3027  * @param[in] attr
3028  *   Pointer to flow attributes
3029  * @param[out] error
3030  *   Pointer to error structure.
3031  *
3032  * @return
3033  *   0 on success, a negative errno value otherwise and rte_errno is set.
3034  */
3035 static int
3036 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
3037                              uint64_t action_flags,
3038                              const struct rte_flow_attr *attr,
3039                              struct rte_flow_error *error)
3040 {
3041         struct mlx5_priv *priv = dev->data->dev_private;
3042         struct mlx5_dev_config *config = &priv->config;
3043         int ret;
3044
3045         /* Fall back if no extended metadata register support. */
3046         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3047                 return mlx5_flow_validate_action_flag(action_flags, attr,
3048                                                       error);
3049         /* Extensive metadata mode requires registers. */
3050         if (!mlx5_flow_ext_mreg_supported(dev))
3051                 return rte_flow_error_set(error, ENOTSUP,
3052                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3053                                           "no metadata registers "
3054                                           "to support flag action");
3055         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
3056                 return rte_flow_error_set(error, ENOTSUP,
3057                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3058                                           "extended metadata register"
3059                                           " isn't available");
3060         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3061         if (ret < 0)
3062                 return ret;
3063         MLX5_ASSERT(ret > 0);
3064         if (action_flags & MLX5_FLOW_ACTION_MARK)
3065                 return rte_flow_error_set(error, EINVAL,
3066                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3067                                           "can't mark and flag in same flow");
3068         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3069                 return rte_flow_error_set(error, EINVAL,
3070                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3071                                           "can't have 2 flag"
3072                                           " actions in same flow");
3073         return 0;
3074 }
3075
3076 /**
3077  * Validate MARK action.
3078  *
3079  * @param[in] dev
3080  *   Pointer to the rte_eth_dev structure.
3081  * @param[in] action
3082  *   Pointer to action.
3083  * @param[in] action_flags
3084  *   Holds the actions detected until now.
3085  * @param[in] attr
3086  *   Pointer to flow attributes
3087  * @param[out] error
3088  *   Pointer to error structure.
3089  *
3090  * @return
3091  *   0 on success, a negative errno value otherwise and rte_errno is set.
3092  */
3093 static int
3094 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
3095                              const struct rte_flow_action *action,
3096                              uint64_t action_flags,
3097                              const struct rte_flow_attr *attr,
3098                              struct rte_flow_error *error)
3099 {
3100         struct mlx5_priv *priv = dev->data->dev_private;
3101         struct mlx5_dev_config *config = &priv->config;
3102         const struct rte_flow_action_mark *mark = action->conf;
3103         int ret;
3104
3105         if (is_tunnel_offload_active(dev))
3106                 return rte_flow_error_set(error, ENOTSUP,
3107                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3108                                           "no mark action "
3109                                           "if tunnel offload active");
3110         /* Fall back if no extended metadata register support. */
3111         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3112                 return mlx5_flow_validate_action_mark(action, action_flags,
3113                                                       attr, error);
3114         /* Extensive metadata mode requires registers. */
3115         if (!mlx5_flow_ext_mreg_supported(dev))
3116                 return rte_flow_error_set(error, ENOTSUP,
3117                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3118                                           "no metadata registers "
3119                                           "to support mark action");
3120         if (!priv->sh->dv_mark_mask)
3121                 return rte_flow_error_set(error, ENOTSUP,
3122                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3123                                           "extended metadata register"
3124                                           " isn't available");
3125         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3126         if (ret < 0)
3127                 return ret;
3128         MLX5_ASSERT(ret > 0);
3129         if (!mark)
3130                 return rte_flow_error_set(error, EINVAL,
3131                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3132                                           "configuration cannot be null");
3133         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3134                 return rte_flow_error_set(error, EINVAL,
3135                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3136                                           &mark->id,
3137                                           "mark id exceeds the limit");
3138         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3139                 return rte_flow_error_set(error, EINVAL,
3140                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3141                                           "can't flag and mark in same flow");
3142         if (action_flags & MLX5_FLOW_ACTION_MARK)
3143                 return rte_flow_error_set(error, EINVAL,
3144                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3145                                           "can't have 2 mark actions in same"
3146                                           " flow");
3147         return 0;
3148 }
3149
3150 /**
3151  * Validate SET_META action.
3152  *
3153  * @param[in] dev
3154  *   Pointer to the rte_eth_dev structure.
3155  * @param[in] action
3156  *   Pointer to the action structure.
3157  * @param[in] action_flags
3158  *   Holds the actions detected until now.
3159  * @param[in] attr
3160  *   Pointer to flow attributes
3161  * @param[out] error
3162  *   Pointer to error structure.
3163  *
3164  * @return
3165  *   0 on success, a negative errno value otherwise and rte_errno is set.
3166  */
3167 static int
3168 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3169                                  const struct rte_flow_action *action,
3170                                  uint64_t action_flags __rte_unused,
3171                                  const struct rte_flow_attr *attr,
3172                                  struct rte_flow_error *error)
3173 {
3174         const struct rte_flow_action_set_meta *conf;
3175         uint32_t nic_mask = UINT32_MAX;
3176         int reg;
3177
3178         if (!mlx5_flow_ext_mreg_supported(dev))
3179                 return rte_flow_error_set(error, ENOTSUP,
3180                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3181                                           "extended metadata register"
3182                                           " isn't supported");
3183         reg = flow_dv_get_metadata_reg(dev, attr, error);
3184         if (reg < 0)
3185                 return reg;
3186         if (reg == REG_NON)
3187                 return rte_flow_error_set(error, ENOTSUP,
3188                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3189                                           "unavalable extended metadata register");
3190         if (reg != REG_A && reg != REG_B) {
3191                 struct mlx5_priv *priv = dev->data->dev_private;
3192
3193                 nic_mask = priv->sh->dv_meta_mask;
3194         }
3195         if (!(action->conf))
3196                 return rte_flow_error_set(error, EINVAL,
3197                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3198                                           "configuration cannot be null");
3199         conf = (const struct rte_flow_action_set_meta *)action->conf;
3200         if (!conf->mask)
3201                 return rte_flow_error_set(error, EINVAL,
3202                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3203                                           "zero mask doesn't have any effect");
3204         if (conf->mask & ~nic_mask)
3205                 return rte_flow_error_set(error, EINVAL,
3206                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3207                                           "meta data must be within reg C0");
3208         return 0;
3209 }
3210
3211 /**
3212  * Validate SET_TAG action.
3213  *
3214  * @param[in] dev
3215  *   Pointer to the rte_eth_dev structure.
3216  * @param[in] action
3217  *   Pointer to the action structure.
3218  * @param[in] action_flags
3219  *   Holds the actions detected until now.
3220  * @param[in] attr
3221  *   Pointer to flow attributes
3222  * @param[out] error
3223  *   Pointer to error structure.
3224  *
3225  * @return
3226  *   0 on success, a negative errno value otherwise and rte_errno is set.
3227  */
3228 static int
3229 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3230                                 const struct rte_flow_action *action,
3231                                 uint64_t action_flags,
3232                                 const struct rte_flow_attr *attr,
3233                                 struct rte_flow_error *error)
3234 {
3235         const struct rte_flow_action_set_tag *conf;
3236         const uint64_t terminal_action_flags =
3237                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3238                 MLX5_FLOW_ACTION_RSS;
3239         int ret;
3240
3241         if (!mlx5_flow_ext_mreg_supported(dev))
3242                 return rte_flow_error_set(error, ENOTSUP,
3243                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3244                                           "extensive metadata register"
3245                                           " isn't supported");
3246         if (!(action->conf))
3247                 return rte_flow_error_set(error, EINVAL,
3248                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3249                                           "configuration cannot be null");
3250         conf = (const struct rte_flow_action_set_tag *)action->conf;
3251         if (!conf->mask)
3252                 return rte_flow_error_set(error, EINVAL,
3253                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3254                                           "zero mask doesn't have any effect");
3255         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3256         if (ret < 0)
3257                 return ret;
3258         if (!attr->transfer && attr->ingress &&
3259             (action_flags & terminal_action_flags))
3260                 return rte_flow_error_set(error, EINVAL,
3261                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3262                                           "set_tag has no effect"
3263                                           " with terminal actions");
3264         return 0;
3265 }
3266
3267 /**
3268  * Check if action counter is shared by either old or new mechanism.
3269  *
3270  * @param[in] action
3271  *   Pointer to the action structure.
3272  *
3273  * @return
3274  *   True when counter is shared, false otherwise.
3275  */
3276 static inline bool
3277 is_shared_action_count(const struct rte_flow_action *action)
3278 {
3279         const struct rte_flow_action_count *count =
3280                         (const struct rte_flow_action_count *)action->conf;
3281
3282         if ((int)action->type == MLX5_RTE_FLOW_ACTION_TYPE_COUNT)
3283                 return true;
3284         return !!(count && count->shared);
3285 }
3286
3287 /**
3288  * Validate count action.
3289  *
3290  * @param[in] dev
3291  *   Pointer to rte_eth_dev structure.
3292  * @param[in] shared
3293  *   Indicator if action is shared.
3294  * @param[in] action_flags
3295  *   Holds the actions detected until now.
3296  * @param[out] error
3297  *   Pointer to error structure.
3298  *
3299  * @return
3300  *   0 on success, a negative errno value otherwise and rte_errno is set.
3301  */
3302 static int
3303 flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
3304                               uint64_t action_flags,
3305                               struct rte_flow_error *error)
3306 {
3307         struct mlx5_priv *priv = dev->data->dev_private;
3308
3309         if (!priv->config.devx)
3310                 goto notsup_err;
3311         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3312                 return rte_flow_error_set(error, EINVAL,
3313                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3314                                           "duplicate count actions set");
3315         if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3316             !priv->sh->flow_hit_aso_en)
3317                 return rte_flow_error_set(error, EINVAL,
3318                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3319                                           "old age and shared count combination is not supported");
3320 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3321         return 0;
3322 #endif
3323 notsup_err:
3324         return rte_flow_error_set
3325                       (error, ENOTSUP,
3326                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3327                        NULL,
3328                        "count action not supported");
3329 }
3330
3331 /**
3332  * Validate the L2 encap action.
3333  *
3334  * @param[in] dev
3335  *   Pointer to the rte_eth_dev structure.
3336  * @param[in] action_flags
3337  *   Holds the actions detected until now.
3338  * @param[in] action
3339  *   Pointer to the action structure.
3340  * @param[in] attr
3341  *   Pointer to flow attributes.
3342  * @param[out] error
3343  *   Pointer to error structure.
3344  *
3345  * @return
3346  *   0 on success, a negative errno value otherwise and rte_errno is set.
3347  */
3348 static int
3349 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3350                                  uint64_t action_flags,
3351                                  const struct rte_flow_action *action,
3352                                  const struct rte_flow_attr *attr,
3353                                  struct rte_flow_error *error)
3354 {
3355         const struct mlx5_priv *priv = dev->data->dev_private;
3356
3357         if (!(action->conf))
3358                 return rte_flow_error_set(error, EINVAL,
3359                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3360                                           "configuration cannot be null");
3361         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3362                 return rte_flow_error_set(error, EINVAL,
3363                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3364                                           "can only have a single encap action "
3365                                           "in a flow");
3366         if (!attr->transfer && priv->representor)
3367                 return rte_flow_error_set(error, ENOTSUP,
3368                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3369                                           "encap action for VF representor "
3370                                           "not supported on NIC table");
3371         return 0;
3372 }
3373
3374 /**
3375  * Validate a decap action.
3376  *
3377  * @param[in] dev
3378  *   Pointer to the rte_eth_dev structure.
3379  * @param[in] action_flags
3380  *   Holds the actions detected until now.
3381  * @param[in] action
3382  *   Pointer to the action structure.
3383  * @param[in] item_flags
3384  *   Holds the items detected.
3385  * @param[in] attr
3386  *   Pointer to flow attributes
3387  * @param[out] error
3388  *   Pointer to error structure.
3389  *
3390  * @return
3391  *   0 on success, a negative errno value otherwise and rte_errno is set.
3392  */
3393 static int
3394 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3395                               uint64_t action_flags,
3396                               const struct rte_flow_action *action,
3397                               const uint64_t item_flags,
3398                               const struct rte_flow_attr *attr,
3399                               struct rte_flow_error *error)
3400 {
3401         const struct mlx5_priv *priv = dev->data->dev_private;
3402
3403         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
3404             !priv->config.decap_en)
3405                 return rte_flow_error_set(error, ENOTSUP,
3406                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3407                                           "decap is not enabled");
3408         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3409                 return rte_flow_error_set(error, ENOTSUP,
3410                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3411                                           action_flags &
3412                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3413                                           "have a single decap action" : "decap "
3414                                           "after encap is not supported");
3415         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3416                 return rte_flow_error_set(error, EINVAL,
3417                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3418                                           "can't have decap action after"
3419                                           " modify action");
3420         if (attr->egress)
3421                 return rte_flow_error_set(error, ENOTSUP,
3422                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3423                                           NULL,
3424                                           "decap action not supported for "
3425                                           "egress");
3426         if (!attr->transfer && priv->representor)
3427                 return rte_flow_error_set(error, ENOTSUP,
3428                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3429                                           "decap action for VF representor "
3430                                           "not supported on NIC table");
3431         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3432             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3433                 return rte_flow_error_set(error, ENOTSUP,
3434                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3435                                 "VXLAN item should be present for VXLAN decap");
3436         return 0;
3437 }
3438
3439 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3440
3441 /**
3442  * Validate the raw encap and decap actions.
3443  *
3444  * @param[in] dev
3445  *   Pointer to the rte_eth_dev structure.
3446  * @param[in] decap
3447  *   Pointer to the decap action.
3448  * @param[in] encap
3449  *   Pointer to the encap action.
3450  * @param[in] attr
3451  *   Pointer to flow attributes
3452  * @param[in/out] action_flags
3453  *   Holds the actions detected until now.
3454  * @param[out] actions_n
3455  *   pointer to the number of actions counter.
3456  * @param[in] action
3457  *   Pointer to the action structure.
3458  * @param[in] item_flags
3459  *   Holds the items detected.
3460  * @param[out] error
3461  *   Pointer to error structure.
3462  *
3463  * @return
3464  *   0 on success, a negative errno value otherwise and rte_errno is set.
3465  */
3466 static int
3467 flow_dv_validate_action_raw_encap_decap
3468         (struct rte_eth_dev *dev,
3469          const struct rte_flow_action_raw_decap *decap,
3470          const struct rte_flow_action_raw_encap *encap,
3471          const struct rte_flow_attr *attr, uint64_t *action_flags,
3472          int *actions_n, const struct rte_flow_action *action,
3473          uint64_t item_flags, struct rte_flow_error *error)
3474 {
3475         const struct mlx5_priv *priv = dev->data->dev_private;
3476         int ret;
3477
3478         if (encap && (!encap->size || !encap->data))
3479                 return rte_flow_error_set(error, EINVAL,
3480                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3481                                           "raw encap data cannot be empty");
3482         if (decap && encap) {
3483                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3484                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3485                         /* L3 encap. */
3486                         decap = NULL;
3487                 else if (encap->size <=
3488                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3489                            decap->size >
3490                            MLX5_ENCAPSULATION_DECISION_SIZE)
3491                         /* L3 decap. */
3492                         encap = NULL;
3493                 else if (encap->size >
3494                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3495                            decap->size >
3496                            MLX5_ENCAPSULATION_DECISION_SIZE)
3497                         /* 2 L2 actions: encap and decap. */
3498                         ;
3499                 else
3500                         return rte_flow_error_set(error,
3501                                 ENOTSUP,
3502                                 RTE_FLOW_ERROR_TYPE_ACTION,
3503                                 NULL, "unsupported too small "
3504                                 "raw decap and too small raw "
3505                                 "encap combination");
3506         }
3507         if (decap) {
3508                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3509                                                     item_flags, attr, error);
3510                 if (ret < 0)
3511                         return ret;
3512                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3513                 ++(*actions_n);
3514         }
3515         if (encap) {
3516                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3517                         return rte_flow_error_set(error, ENOTSUP,
3518                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3519                                                   NULL,
3520                                                   "small raw encap size");
3521                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3522                         return rte_flow_error_set(error, EINVAL,
3523                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3524                                                   NULL,
3525                                                   "more than one encap action");
3526                 if (!attr->transfer && priv->representor)
3527                         return rte_flow_error_set
3528                                         (error, ENOTSUP,
3529                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3530                                          "encap action for VF representor "
3531                                          "not supported on NIC table");
3532                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3533                 ++(*actions_n);
3534         }
3535         return 0;
3536 }
3537
3538 /*
3539  * Validate the ASO CT action.
3540  *
3541  * @param[in] dev
3542  *   Pointer to the rte_eth_dev structure.
3543  * @param[in] action_flags
3544  *   Holds the actions detected until now.
3545  * @param[in] item_flags
3546  *   The items found in this flow rule.
3547  * @param[in] attr
3548  *   Pointer to flow attributes.
3549  * @param[out] error
3550  *   Pointer to error structure.
3551  *
3552  * @return
3553  *   0 on success, a negative errno value otherwise and rte_errno is set.
3554  */
3555 static int
3556 flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3557                                uint64_t action_flags,
3558                                uint64_t item_flags,
3559                                const struct rte_flow_attr *attr,
3560                                struct rte_flow_error *error)
3561 {
3562         RTE_SET_USED(dev);
3563
3564         if (attr->group == 0 && !attr->transfer)
3565                 return rte_flow_error_set(error, ENOTSUP,
3566                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3567                                           NULL,
3568                                           "Only support non-root table");
3569         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
3570                 return rte_flow_error_set(error, ENOTSUP,
3571                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3572                                           "CT cannot follow a fate action");
3573         if ((action_flags & MLX5_FLOW_ACTION_METER) ||
3574             (action_flags & MLX5_FLOW_ACTION_AGE))
3575                 return rte_flow_error_set(error, EINVAL,
3576                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3577                                           "Only one ASO action is supported");
3578         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3579                 return rte_flow_error_set(error, EINVAL,
3580                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3581                                           "Encap cannot exist before CT");
3582         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3583                 return rte_flow_error_set(error, EINVAL,
3584                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3585                                           "Not a outer TCP packet");
3586         return 0;
3587 }
3588
3589 /**
3590  * Match encap_decap resource.
3591  *
3592  * @param list
3593  *   Pointer to the hash list.
3594  * @param entry
3595  *   Pointer to exist resource entry object.
3596  * @param key
3597  *   Key of the new entry.
3598  * @param ctx_cb
3599  *   Pointer to new encap_decap resource.
3600  *
3601  * @return
3602  *   0 on matching, none-zero otherwise.
3603  */
3604 int
3605 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
3606                              struct mlx5_hlist_entry *entry,
3607                              uint64_t key __rte_unused, void *cb_ctx)
3608 {
3609         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3610         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3611         struct mlx5_flow_dv_encap_decap_resource *resource;
3612
3613         resource = container_of(entry, struct mlx5_flow_dv_encap_decap_resource,
3614                                 entry);
3615         if (resource->reformat_type == ctx_resource->reformat_type &&
3616             resource->ft_type == ctx_resource->ft_type &&
3617             resource->flags == ctx_resource->flags &&
3618             resource->size == ctx_resource->size &&
3619             !memcmp((const void *)resource->buf,
3620                     (const void *)ctx_resource->buf,
3621                     resource->size))
3622                 return 0;
3623         return -1;
3624 }
3625
3626 /**
3627  * Allocate encap_decap resource.
3628  *
3629  * @param list
3630  *   Pointer to the hash list.
3631  * @param entry
3632  *   Pointer to exist resource entry object.
3633  * @param ctx_cb
3634  *   Pointer to new encap_decap resource.
3635  *
3636  * @return
3637  *   0 on matching, none-zero otherwise.
3638  */
3639 struct mlx5_hlist_entry *
3640 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
3641                               uint64_t key __rte_unused,
3642                               void *cb_ctx)
3643 {
3644         struct mlx5_dev_ctx_shared *sh = list->ctx;
3645         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3646         struct mlx5dv_dr_domain *domain;
3647         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3648         struct mlx5_flow_dv_encap_decap_resource *resource;
3649         uint32_t idx;
3650         int ret;
3651
3652         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3653                 domain = sh->fdb_domain;
3654         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3655                 domain = sh->rx_domain;
3656         else
3657                 domain = sh->tx_domain;
3658         /* Register new encap/decap resource. */
3659         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &idx);
3660         if (!resource) {
3661                 rte_flow_error_set(ctx->error, ENOMEM,
3662                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3663                                    "cannot allocate resource memory");
3664                 return NULL;
3665         }
3666         *resource = *ctx_resource;
3667         resource->idx = idx;
3668         ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->ctx, domain,
3669                                                               resource,
3670                                                              &resource->action);
3671         if (ret) {
3672                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3673                 rte_flow_error_set(ctx->error, ENOMEM,
3674                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3675                                    NULL, "cannot create action");
3676                 return NULL;
3677         }
3678
3679         return &resource->entry;
3680 }
3681
3682 /**
3683  * Find existing encap/decap resource or create and register a new one.
3684  *
3685  * @param[in, out] dev
3686  *   Pointer to rte_eth_dev structure.
3687  * @param[in, out] resource
3688  *   Pointer to encap/decap resource.
3689  * @parm[in, out] dev_flow
3690  *   Pointer to the dev_flow.
3691  * @param[out] error
3692  *   pointer to error structure.
3693  *
3694  * @return
3695  *   0 on success otherwise -errno and errno is set.
3696  */
3697 static int
3698 flow_dv_encap_decap_resource_register
3699                         (struct rte_eth_dev *dev,
3700                          struct mlx5_flow_dv_encap_decap_resource *resource,
3701                          struct mlx5_flow *dev_flow,
3702                          struct rte_flow_error *error)
3703 {
3704         struct mlx5_priv *priv = dev->data->dev_private;
3705         struct mlx5_dev_ctx_shared *sh = priv->sh;
3706         struct mlx5_hlist_entry *entry;
3707         union {
3708                 struct {
3709                         uint32_t ft_type:8;
3710                         uint32_t refmt_type:8;
3711                         /*
3712                          * Header reformat actions can be shared between
3713                          * non-root tables. One bit to indicate non-root
3714                          * table or not.
3715                          */
3716                         uint32_t is_root:1;
3717                         uint32_t reserve:15;
3718                 };
3719                 uint32_t v32;
3720         } encap_decap_key = {
3721                 {
3722                         .ft_type = resource->ft_type,
3723                         .refmt_type = resource->reformat_type,
3724                         .is_root = !!dev_flow->dv.group,
3725                         .reserve = 0,
3726                 }
3727         };
3728         struct mlx5_flow_cb_ctx ctx = {
3729                 .error = error,
3730                 .data = resource,
3731         };
3732         uint64_t key64;
3733
3734         resource->flags = dev_flow->dv.group ? 0 : 1;
3735         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3736                                  sizeof(encap_decap_key.v32), 0);
3737         if (resource->reformat_type !=
3738             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3739             resource->size)
3740                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3741         entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx);
3742         if (!entry)
3743                 return -rte_errno;
3744         resource = container_of(entry, typeof(*resource), entry);
3745         dev_flow->dv.encap_decap = resource;
3746         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3747         return 0;
3748 }
3749
3750 /**
3751  * Find existing table jump resource or create and register a new one.
3752  *
3753  * @param[in, out] dev
3754  *   Pointer to rte_eth_dev structure.
3755  * @param[in, out] tbl
3756  *   Pointer to flow table resource.
3757  * @parm[in, out] dev_flow
3758  *   Pointer to the dev_flow.
3759  * @param[out] error
3760  *   pointer to error structure.
3761  *
3762  * @return
3763  *   0 on success otherwise -errno and errno is set.
3764  */
3765 static int
3766 flow_dv_jump_tbl_resource_register
3767                         (struct rte_eth_dev *dev __rte_unused,
3768                          struct mlx5_flow_tbl_resource *tbl,
3769                          struct mlx5_flow *dev_flow,
3770                          struct rte_flow_error *error __rte_unused)
3771 {
3772         struct mlx5_flow_tbl_data_entry *tbl_data =
3773                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3774
3775         MLX5_ASSERT(tbl);
3776         MLX5_ASSERT(tbl_data->jump.action);
3777         dev_flow->handle->rix_jump = tbl_data->idx;
3778         dev_flow->dv.jump = &tbl_data->jump;
3779         return 0;
3780 }
3781
3782 int
3783 flow_dv_port_id_match_cb(struct mlx5_list *list __rte_unused,
3784                          struct mlx5_list_entry *entry, void *cb_ctx)
3785 {
3786         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3787         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3788         struct mlx5_flow_dv_port_id_action_resource *res =
3789                         container_of(entry, typeof(*res), entry);
3790
3791         return ref->port_id != res->port_id;
3792 }
3793
3794 struct mlx5_list_entry *
3795 flow_dv_port_id_create_cb(struct mlx5_list *list,
3796                           struct mlx5_list_entry *entry __rte_unused,
3797                           void *cb_ctx)
3798 {
3799         struct mlx5_dev_ctx_shared *sh = list->ctx;
3800         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3801         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3802         struct mlx5_flow_dv_port_id_action_resource *resource;
3803         uint32_t idx;
3804         int ret;
3805
3806         /* Register new port id action resource. */
3807         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3808         if (!resource) {
3809                 rte_flow_error_set(ctx->error, ENOMEM,
3810                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3811                                    "cannot allocate port_id action memory");
3812                 return NULL;
3813         }
3814         *resource = *ref;
3815         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3816                                                         ref->port_id,
3817                                                         &resource->action);
3818         if (ret) {
3819                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3820                 rte_flow_error_set(ctx->error, ENOMEM,
3821                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3822                                    "cannot create action");
3823                 return NULL;
3824         }
3825         resource->idx = idx;
3826         return &resource->entry;
3827 }
3828
3829 struct mlx5_list_entry *
3830 flow_dv_port_id_clone_cb(struct mlx5_list *list,
3831                           struct mlx5_list_entry *entry __rte_unused,
3832                           void *cb_ctx)
3833 {
3834         struct mlx5_dev_ctx_shared *sh = list->ctx;
3835         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3836         struct mlx5_flow_dv_port_id_action_resource *resource;
3837         uint32_t idx;
3838
3839         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3840         if (!resource) {
3841                 rte_flow_error_set(ctx->error, ENOMEM,
3842                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3843                                    "cannot allocate port_id action memory");
3844                 return NULL;
3845         }
3846         memcpy(resource, entry, sizeof(*resource));
3847         resource->idx = idx;
3848         return &resource->entry;
3849 }
3850
3851 void
3852 flow_dv_port_id_clone_free_cb(struct mlx5_list *list,
3853                           struct mlx5_list_entry *entry)
3854 {
3855         struct mlx5_dev_ctx_shared *sh = list->ctx;
3856         struct mlx5_flow_dv_port_id_action_resource *resource =
3857                         container_of(entry, typeof(*resource), entry);
3858
3859         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
3860 }
3861
3862 /**
3863  * Find existing table port ID resource or create and register a new one.
3864  *
3865  * @param[in, out] dev
3866  *   Pointer to rte_eth_dev structure.
3867  * @param[in, out] ref
3868  *   Pointer to port ID action resource reference.
3869  * @parm[in, out] dev_flow
3870  *   Pointer to the dev_flow.
3871  * @param[out] error
3872  *   pointer to error structure.
3873  *
3874  * @return
3875  *   0 on success otherwise -errno and errno is set.
3876  */
3877 static int
3878 flow_dv_port_id_action_resource_register
3879                         (struct rte_eth_dev *dev,
3880                          struct mlx5_flow_dv_port_id_action_resource *ref,
3881                          struct mlx5_flow *dev_flow,
3882                          struct rte_flow_error *error)
3883 {
3884         struct mlx5_priv *priv = dev->data->dev_private;
3885         struct mlx5_list_entry *entry;
3886         struct mlx5_flow_dv_port_id_action_resource *resource;
3887         struct mlx5_flow_cb_ctx ctx = {
3888                 .error = error,
3889                 .data = ref,
3890         };
3891
3892         entry = mlx5_list_register(priv->sh->port_id_action_list, &ctx);
3893         if (!entry)
3894                 return -rte_errno;
3895         resource = container_of(entry, typeof(*resource), entry);
3896         dev_flow->dv.port_id_action = resource;
3897         dev_flow->handle->rix_port_id_action = resource->idx;
3898         return 0;
3899 }
3900
3901 int
3902 flow_dv_push_vlan_match_cb(struct mlx5_list *list __rte_unused,
3903                          struct mlx5_list_entry *entry, void *cb_ctx)
3904 {
3905         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3906         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3907         struct mlx5_flow_dv_push_vlan_action_resource *res =
3908                         container_of(entry, typeof(*res), entry);
3909
3910         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3911 }
3912
3913 struct mlx5_list_entry *
3914 flow_dv_push_vlan_create_cb(struct mlx5_list *list,
3915                           struct mlx5_list_entry *entry __rte_unused,
3916                           void *cb_ctx)
3917 {
3918         struct mlx5_dev_ctx_shared *sh = list->ctx;
3919         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3920         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3921         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3922         struct mlx5dv_dr_domain *domain;
3923         uint32_t idx;
3924         int ret;
3925
3926         /* Register new port id action resource. */
3927         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3928         if (!resource) {
3929                 rte_flow_error_set(ctx->error, ENOMEM,
3930                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3931                                    "cannot allocate push_vlan action memory");
3932                 return NULL;
3933         }
3934         *resource = *ref;
3935         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3936                 domain = sh->fdb_domain;
3937         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3938                 domain = sh->rx_domain;
3939         else
3940                 domain = sh->tx_domain;
3941         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3942                                                         &resource->action);
3943         if (ret) {
3944                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3945                 rte_flow_error_set(ctx->error, ENOMEM,
3946                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3947                                    "cannot create push vlan action");
3948                 return NULL;
3949         }
3950         resource->idx = idx;
3951         return &resource->entry;
3952 }
3953
3954 struct mlx5_list_entry *
3955 flow_dv_push_vlan_clone_cb(struct mlx5_list *list,
3956                           struct mlx5_list_entry *entry __rte_unused,
3957                           void *cb_ctx)
3958 {
3959         struct mlx5_dev_ctx_shared *sh = list->ctx;
3960         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3961         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3962         uint32_t idx;
3963
3964         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3965         if (!resource) {
3966                 rte_flow_error_set(ctx->error, ENOMEM,
3967                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3968                                    "cannot allocate push_vlan action memory");
3969                 return NULL;
3970         }
3971         memcpy(resource, entry, sizeof(*resource));
3972         resource->idx = idx;
3973         return &resource->entry;
3974 }
3975
3976 void
3977 flow_dv_push_vlan_clone_free_cb(struct mlx5_list *list,
3978                             struct mlx5_list_entry *entry)
3979 {
3980         struct mlx5_dev_ctx_shared *sh = list->ctx;
3981         struct mlx5_flow_dv_push_vlan_action_resource *resource =
3982                         container_of(entry, typeof(*resource), entry);
3983
3984         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
3985 }
3986
3987 /**
3988  * Find existing push vlan resource or create and register a new one.
3989  *
3990  * @param [in, out] dev
3991  *   Pointer to rte_eth_dev structure.
3992  * @param[in, out] ref
3993  *   Pointer to port ID action resource reference.
3994  * @parm[in, out] dev_flow
3995  *   Pointer to the dev_flow.
3996  * @param[out] error
3997  *   pointer to error structure.
3998  *
3999  * @return
4000  *   0 on success otherwise -errno and errno is set.
4001  */
4002 static int
4003 flow_dv_push_vlan_action_resource_register
4004                        (struct rte_eth_dev *dev,
4005                         struct mlx5_flow_dv_push_vlan_action_resource *ref,
4006                         struct mlx5_flow *dev_flow,
4007                         struct rte_flow_error *error)
4008 {
4009         struct mlx5_priv *priv = dev->data->dev_private;
4010         struct mlx5_flow_dv_push_vlan_action_resource *resource;
4011         struct mlx5_list_entry *entry;
4012         struct mlx5_flow_cb_ctx ctx = {
4013                 .error = error,
4014                 .data = ref,
4015         };
4016
4017         entry = mlx5_list_register(priv->sh->push_vlan_action_list, &ctx);
4018         if (!entry)
4019                 return -rte_errno;
4020         resource = container_of(entry, typeof(*resource), entry);
4021
4022         dev_flow->handle->dvh.rix_push_vlan = resource->idx;
4023         dev_flow->dv.push_vlan_res = resource;
4024         return 0;
4025 }
4026
4027 /**
4028  * Get the size of specific rte_flow_item_type hdr size
4029  *
4030  * @param[in] item_type
4031  *   Tested rte_flow_item_type.
4032  *
4033  * @return
4034  *   sizeof struct item_type, 0 if void or irrelevant.
4035  */
4036 static size_t
4037 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
4038 {
4039         size_t retval;
4040
4041         switch (item_type) {
4042         case RTE_FLOW_ITEM_TYPE_ETH:
4043                 retval = sizeof(struct rte_ether_hdr);
4044                 break;
4045         case RTE_FLOW_ITEM_TYPE_VLAN:
4046                 retval = sizeof(struct rte_vlan_hdr);
4047                 break;
4048         case RTE_FLOW_ITEM_TYPE_IPV4:
4049                 retval = sizeof(struct rte_ipv4_hdr);
4050                 break;
4051         case RTE_FLOW_ITEM_TYPE_IPV6:
4052                 retval = sizeof(struct rte_ipv6_hdr);
4053                 break;
4054         case RTE_FLOW_ITEM_TYPE_UDP:
4055                 retval = sizeof(struct rte_udp_hdr);
4056                 break;
4057         case RTE_FLOW_ITEM_TYPE_TCP:
4058                 retval = sizeof(struct rte_tcp_hdr);
4059                 break;
4060         case RTE_FLOW_ITEM_TYPE_VXLAN:
4061         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4062                 retval = sizeof(struct rte_vxlan_hdr);
4063                 break;
4064         case RTE_FLOW_ITEM_TYPE_GRE:
4065         case RTE_FLOW_ITEM_TYPE_NVGRE:
4066                 retval = sizeof(struct rte_gre_hdr);
4067                 break;
4068         case RTE_FLOW_ITEM_TYPE_MPLS:
4069                 retval = sizeof(struct rte_mpls_hdr);
4070                 break;
4071         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
4072         default:
4073                 retval = 0;
4074                 break;
4075         }
4076         return retval;
4077 }
4078
4079 #define MLX5_ENCAP_IPV4_VERSION         0x40
4080 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
4081 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
4082 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
4083 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
4084 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
4085 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
4086
4087 /**
4088  * Convert the encap action data from list of rte_flow_item to raw buffer
4089  *
4090  * @param[in] items
4091  *   Pointer to rte_flow_item objects list.
4092  * @param[out] buf
4093  *   Pointer to the output buffer.
4094  * @param[out] size
4095  *   Pointer to the output buffer size.
4096  * @param[out] error
4097  *   Pointer to the error structure.
4098  *
4099  * @return
4100  *   0 on success, a negative errno value otherwise and rte_errno is set.
4101  */
4102 static int
4103 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
4104                            size_t *size, struct rte_flow_error *error)
4105 {
4106         struct rte_ether_hdr *eth = NULL;
4107         struct rte_vlan_hdr *vlan = NULL;
4108         struct rte_ipv4_hdr *ipv4 = NULL;
4109         struct rte_ipv6_hdr *ipv6 = NULL;
4110         struct rte_udp_hdr *udp = NULL;
4111         struct rte_vxlan_hdr *vxlan = NULL;
4112         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
4113         struct rte_gre_hdr *gre = NULL;
4114         size_t len;
4115         size_t temp_size = 0;
4116
4117         if (!items)
4118                 return rte_flow_error_set(error, EINVAL,
4119                                           RTE_FLOW_ERROR_TYPE_ACTION,
4120                                           NULL, "invalid empty data");
4121         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4122                 len = flow_dv_get_item_hdr_len(items->type);
4123                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
4124                         return rte_flow_error_set(error, EINVAL,
4125                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4126                                                   (void *)items->type,
4127                                                   "items total size is too big"
4128                                                   " for encap action");
4129                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
4130                 switch (items->type) {
4131                 case RTE_FLOW_ITEM_TYPE_ETH:
4132                         eth = (struct rte_ether_hdr *)&buf[temp_size];
4133                         break;
4134                 case RTE_FLOW_ITEM_TYPE_VLAN:
4135                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
4136                         if (!eth)
4137                                 return rte_flow_error_set(error, EINVAL,
4138                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4139                                                 (void *)items->type,
4140                                                 "eth header not found");
4141                         if (!eth->ether_type)
4142                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
4143                         break;
4144                 case RTE_FLOW_ITEM_TYPE_IPV4:
4145                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
4146                         if (!vlan && !eth)
4147                                 return rte_flow_error_set(error, EINVAL,
4148                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4149                                                 (void *)items->type,
4150                                                 "neither eth nor vlan"
4151                                                 " header found");
4152                         if (vlan && !vlan->eth_proto)
4153                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4154                         else if (eth && !eth->ether_type)
4155                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4156                         if (!ipv4->version_ihl)
4157                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
4158                                                     MLX5_ENCAP_IPV4_IHL_MIN;
4159                         if (!ipv4->time_to_live)
4160                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
4161                         break;
4162                 case RTE_FLOW_ITEM_TYPE_IPV6:
4163                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
4164                         if (!vlan && !eth)
4165                                 return rte_flow_error_set(error, EINVAL,
4166                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4167                                                 (void *)items->type,
4168                                                 "neither eth nor vlan"
4169                                                 " header found");
4170                         if (vlan && !vlan->eth_proto)
4171                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4172                         else if (eth && !eth->ether_type)
4173                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4174                         if (!ipv6->vtc_flow)
4175                                 ipv6->vtc_flow =
4176                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
4177                         if (!ipv6->hop_limits)
4178                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
4179                         break;
4180                 case RTE_FLOW_ITEM_TYPE_UDP:
4181                         udp = (struct rte_udp_hdr *)&buf[temp_size];
4182                         if (!ipv4 && !ipv6)
4183                                 return rte_flow_error_set(error, EINVAL,
4184                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4185                                                 (void *)items->type,
4186                                                 "ip header not found");
4187                         if (ipv4 && !ipv4->next_proto_id)
4188                                 ipv4->next_proto_id = IPPROTO_UDP;
4189                         else if (ipv6 && !ipv6->proto)
4190                                 ipv6->proto = IPPROTO_UDP;
4191                         break;
4192                 case RTE_FLOW_ITEM_TYPE_VXLAN:
4193                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
4194                         if (!udp)
4195                                 return rte_flow_error_set(error, EINVAL,
4196                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4197                                                 (void *)items->type,
4198                                                 "udp header not found");
4199                         if (!udp->dst_port)
4200                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
4201                         if (!vxlan->vx_flags)
4202                                 vxlan->vx_flags =
4203                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
4204                         break;
4205                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4206                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
4207                         if (!udp)
4208                                 return rte_flow_error_set(error, EINVAL,
4209                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4210                                                 (void *)items->type,
4211                                                 "udp header not found");
4212                         if (!vxlan_gpe->proto)
4213                                 return rte_flow_error_set(error, EINVAL,
4214                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4215                                                 (void *)items->type,
4216                                                 "next protocol not found");
4217                         if (!udp->dst_port)
4218                                 udp->dst_port =
4219                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
4220                         if (!vxlan_gpe->vx_flags)
4221                                 vxlan_gpe->vx_flags =
4222                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
4223                         break;
4224                 case RTE_FLOW_ITEM_TYPE_GRE:
4225                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4226                         gre = (struct rte_gre_hdr *)&buf[temp_size];
4227                         if (!gre->proto)
4228                                 return rte_flow_error_set(error, EINVAL,
4229                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4230                                                 (void *)items->type,
4231                                                 "next protocol not found");
4232                         if (!ipv4 && !ipv6)
4233                                 return rte_flow_error_set(error, EINVAL,
4234                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4235                                                 (void *)items->type,
4236                                                 "ip header not found");
4237                         if (ipv4 && !ipv4->next_proto_id)
4238                                 ipv4->next_proto_id = IPPROTO_GRE;
4239                         else if (ipv6 && !ipv6->proto)
4240                                 ipv6->proto = IPPROTO_GRE;
4241                         break;
4242                 case RTE_FLOW_ITEM_TYPE_VOID:
4243                         break;
4244                 default:
4245                         return rte_flow_error_set(error, EINVAL,
4246                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4247                                                   (void *)items->type,
4248                                                   "unsupported item type");
4249                         break;
4250                 }
4251                 temp_size += len;
4252         }
4253         *size = temp_size;
4254         return 0;
4255 }
4256
4257 static int
4258 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4259 {
4260         struct rte_ether_hdr *eth = NULL;
4261         struct rte_vlan_hdr *vlan = NULL;
4262         struct rte_ipv6_hdr *ipv6 = NULL;
4263         struct rte_udp_hdr *udp = NULL;
4264         char *next_hdr;
4265         uint16_t proto;
4266
4267         eth = (struct rte_ether_hdr *)data;
4268         next_hdr = (char *)(eth + 1);
4269         proto = RTE_BE16(eth->ether_type);
4270
4271         /* VLAN skipping */
4272         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4273                 vlan = (struct rte_vlan_hdr *)next_hdr;
4274                 proto = RTE_BE16(vlan->eth_proto);
4275                 next_hdr += sizeof(struct rte_vlan_hdr);
4276         }
4277
4278         /* HW calculates IPv4 csum. no need to proceed */
4279         if (proto == RTE_ETHER_TYPE_IPV4)
4280                 return 0;
4281
4282         /* non IPv4/IPv6 header. not supported */
4283         if (proto != RTE_ETHER_TYPE_IPV6) {
4284                 return rte_flow_error_set(error, ENOTSUP,
4285                                           RTE_FLOW_ERROR_TYPE_ACTION,
4286                                           NULL, "Cannot offload non IPv4/IPv6");
4287         }
4288
4289         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4290
4291         /* ignore non UDP */
4292         if (ipv6->proto != IPPROTO_UDP)
4293                 return 0;
4294
4295         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4296         udp->dgram_cksum = 0;
4297
4298         return 0;
4299 }
4300
4301 /**
4302  * Convert L2 encap action to DV specification.
4303  *
4304  * @param[in] dev
4305  *   Pointer to rte_eth_dev structure.
4306  * @param[in] action
4307  *   Pointer to action structure.
4308  * @param[in, out] dev_flow
4309  *   Pointer to the mlx5_flow.
4310  * @param[in] transfer
4311  *   Mark if the flow is E-Switch flow.
4312  * @param[out] error
4313  *   Pointer to the error structure.
4314  *
4315  * @return
4316  *   0 on success, a negative errno value otherwise and rte_errno is set.
4317  */
4318 static int
4319 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4320                                const struct rte_flow_action *action,
4321                                struct mlx5_flow *dev_flow,
4322                                uint8_t transfer,
4323                                struct rte_flow_error *error)
4324 {
4325         const struct rte_flow_item *encap_data;
4326         const struct rte_flow_action_raw_encap *raw_encap_data;
4327         struct mlx5_flow_dv_encap_decap_resource res = {
4328                 .reformat_type =
4329                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4330                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4331                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4332         };
4333
4334         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4335                 raw_encap_data =
4336                         (const struct rte_flow_action_raw_encap *)action->conf;
4337                 res.size = raw_encap_data->size;
4338                 memcpy(res.buf, raw_encap_data->data, res.size);
4339         } else {
4340                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4341                         encap_data =
4342                                 ((const struct rte_flow_action_vxlan_encap *)
4343                                                 action->conf)->definition;
4344                 else
4345                         encap_data =
4346                                 ((const struct rte_flow_action_nvgre_encap *)
4347                                                 action->conf)->definition;
4348                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4349                                                &res.size, error))
4350                         return -rte_errno;
4351         }
4352         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4353                 return -rte_errno;
4354         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4355                 return rte_flow_error_set(error, EINVAL,
4356                                           RTE_FLOW_ERROR_TYPE_ACTION,
4357                                           NULL, "can't create L2 encap action");
4358         return 0;
4359 }
4360
4361 /**
4362  * Convert L2 decap action to DV specification.
4363  *
4364  * @param[in] dev
4365  *   Pointer to rte_eth_dev structure.
4366  * @param[in, out] dev_flow
4367  *   Pointer to the mlx5_flow.
4368  * @param[in] transfer
4369  *   Mark if the flow is E-Switch flow.
4370  * @param[out] error
4371  *   Pointer to the error structure.
4372  *
4373  * @return
4374  *   0 on success, a negative errno value otherwise and rte_errno is set.
4375  */
4376 static int
4377 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4378                                struct mlx5_flow *dev_flow,
4379                                uint8_t transfer,
4380                                struct rte_flow_error *error)
4381 {
4382         struct mlx5_flow_dv_encap_decap_resource res = {
4383                 .size = 0,
4384                 .reformat_type =
4385                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4386                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4387                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4388         };
4389
4390         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4391                 return rte_flow_error_set(error, EINVAL,
4392                                           RTE_FLOW_ERROR_TYPE_ACTION,
4393                                           NULL, "can't create L2 decap action");
4394         return 0;
4395 }
4396
4397 /**
4398  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4399  *
4400  * @param[in] dev
4401  *   Pointer to rte_eth_dev structure.
4402  * @param[in] action
4403  *   Pointer to action structure.
4404  * @param[in, out] dev_flow
4405  *   Pointer to the mlx5_flow.
4406  * @param[in] attr
4407  *   Pointer to the flow attributes.
4408  * @param[out] error
4409  *   Pointer to the error structure.
4410  *
4411  * @return
4412  *   0 on success, a negative errno value otherwise and rte_errno is set.
4413  */
4414 static int
4415 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4416                                 const struct rte_flow_action *action,
4417                                 struct mlx5_flow *dev_flow,
4418                                 const struct rte_flow_attr *attr,
4419                                 struct rte_flow_error *error)
4420 {
4421         const struct rte_flow_action_raw_encap *encap_data;
4422         struct mlx5_flow_dv_encap_decap_resource res;
4423
4424         memset(&res, 0, sizeof(res));
4425         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4426         res.size = encap_data->size;
4427         memcpy(res.buf, encap_data->data, res.size);
4428         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4429                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4430                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4431         if (attr->transfer)
4432                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4433         else
4434                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4435                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4436         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4437                 return rte_flow_error_set(error, EINVAL,
4438                                           RTE_FLOW_ERROR_TYPE_ACTION,
4439                                           NULL, "can't create encap action");
4440         return 0;
4441 }
4442
4443 /**
4444  * Create action push VLAN.
4445  *
4446  * @param[in] dev
4447  *   Pointer to rte_eth_dev structure.
4448  * @param[in] attr
4449  *   Pointer to the flow attributes.
4450  * @param[in] vlan
4451  *   Pointer to the vlan to push to the Ethernet header.
4452  * @param[in, out] dev_flow
4453  *   Pointer to the mlx5_flow.
4454  * @param[out] error
4455  *   Pointer to the error structure.
4456  *
4457  * @return
4458  *   0 on success, a negative errno value otherwise and rte_errno is set.
4459  */
4460 static int
4461 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4462                                 const struct rte_flow_attr *attr,
4463                                 const struct rte_vlan_hdr *vlan,
4464                                 struct mlx5_flow *dev_flow,
4465                                 struct rte_flow_error *error)
4466 {
4467         struct mlx5_flow_dv_push_vlan_action_resource res;
4468
4469         memset(&res, 0, sizeof(res));
4470         res.vlan_tag =
4471                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4472                                  vlan->vlan_tci);
4473         if (attr->transfer)
4474                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4475         else
4476                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4477                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4478         return flow_dv_push_vlan_action_resource_register
4479                                             (dev, &res, dev_flow, error);
4480 }
4481
4482 /**
4483  * Validate the modify-header actions.
4484  *
4485  * @param[in] action_flags
4486  *   Holds the actions detected until now.
4487  * @param[in] action
4488  *   Pointer to the modify action.
4489  * @param[out] error
4490  *   Pointer to error structure.
4491  *
4492  * @return
4493  *   0 on success, a negative errno value otherwise and rte_errno is set.
4494  */
4495 static int
4496 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4497                                    const struct rte_flow_action *action,
4498                                    struct rte_flow_error *error)
4499 {
4500         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4501                 return rte_flow_error_set(error, EINVAL,
4502                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4503                                           NULL, "action configuration not set");
4504         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4505                 return rte_flow_error_set(error, EINVAL,
4506                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4507                                           "can't have encap action before"
4508                                           " modify action");
4509         return 0;
4510 }
4511
4512 /**
4513  * Validate the modify-header MAC address actions.
4514  *
4515  * @param[in] action_flags
4516  *   Holds the actions detected until now.
4517  * @param[in] action
4518  *   Pointer to the modify action.
4519  * @param[in] item_flags
4520  *   Holds the items detected.
4521  * @param[out] error
4522  *   Pointer to error structure.
4523  *
4524  * @return
4525  *   0 on success, a negative errno value otherwise and rte_errno is set.
4526  */
4527 static int
4528 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4529                                    const struct rte_flow_action *action,
4530                                    const uint64_t item_flags,
4531                                    struct rte_flow_error *error)
4532 {
4533         int ret = 0;
4534
4535         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4536         if (!ret) {
4537                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4538                         return rte_flow_error_set(error, EINVAL,
4539                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4540                                                   NULL,
4541                                                   "no L2 item in pattern");
4542         }
4543         return ret;
4544 }
4545
4546 /**
4547  * Validate the modify-header IPv4 address actions.
4548  *
4549  * @param[in] action_flags
4550  *   Holds the actions detected until now.
4551  * @param[in] action
4552  *   Pointer to the modify action.
4553  * @param[in] item_flags
4554  *   Holds the items detected.
4555  * @param[out] error
4556  *   Pointer to error structure.
4557  *
4558  * @return
4559  *   0 on success, a negative errno value otherwise and rte_errno is set.
4560  */
4561 static int
4562 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4563                                     const struct rte_flow_action *action,
4564                                     const uint64_t item_flags,
4565                                     struct rte_flow_error *error)
4566 {
4567         int ret = 0;
4568         uint64_t layer;
4569
4570         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4571         if (!ret) {
4572                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4573                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4574                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4575                 if (!(item_flags & layer))
4576                         return rte_flow_error_set(error, EINVAL,
4577                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4578                                                   NULL,
4579                                                   "no ipv4 item in pattern");
4580         }
4581         return ret;
4582 }
4583
4584 /**
4585  * Validate the modify-header IPv6 address actions.
4586  *
4587  * @param[in] action_flags
4588  *   Holds the actions detected until now.
4589  * @param[in] action
4590  *   Pointer to the modify action.
4591  * @param[in] item_flags
4592  *   Holds the items detected.
4593  * @param[out] error
4594  *   Pointer to error structure.
4595  *
4596  * @return
4597  *   0 on success, a negative errno value otherwise and rte_errno is set.
4598  */
4599 static int
4600 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4601                                     const struct rte_flow_action *action,
4602                                     const uint64_t item_flags,
4603                                     struct rte_flow_error *error)
4604 {
4605         int ret = 0;
4606         uint64_t layer;
4607
4608         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4609         if (!ret) {
4610                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4611                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4612                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4613                 if (!(item_flags & layer))
4614                         return rte_flow_error_set(error, EINVAL,
4615                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4616                                                   NULL,
4617                                                   "no ipv6 item in pattern");
4618         }
4619         return ret;
4620 }
4621
4622 /**
4623  * Validate the modify-header TP actions.
4624  *
4625  * @param[in] action_flags
4626  *   Holds the actions detected until now.
4627  * @param[in] action
4628  *   Pointer to the modify action.
4629  * @param[in] item_flags
4630  *   Holds the items detected.
4631  * @param[out] error
4632  *   Pointer to error structure.
4633  *
4634  * @return
4635  *   0 on success, a negative errno value otherwise and rte_errno is set.
4636  */
4637 static int
4638 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4639                                   const struct rte_flow_action *action,
4640                                   const uint64_t item_flags,
4641                                   struct rte_flow_error *error)
4642 {
4643         int ret = 0;
4644         uint64_t layer;
4645
4646         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4647         if (!ret) {
4648                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4649                                  MLX5_FLOW_LAYER_INNER_L4 :
4650                                  MLX5_FLOW_LAYER_OUTER_L4;
4651                 if (!(item_flags & layer))
4652                         return rte_flow_error_set(error, EINVAL,
4653                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4654                                                   NULL, "no transport layer "
4655                                                   "in pattern");
4656         }
4657         return ret;
4658 }
4659
4660 /**
4661  * Validate the modify-header actions of increment/decrement
4662  * TCP Sequence-number.
4663  *
4664  * @param[in] action_flags
4665  *   Holds the actions detected until now.
4666  * @param[in] action
4667  *   Pointer to the modify action.
4668  * @param[in] item_flags
4669  *   Holds the items detected.
4670  * @param[out] error
4671  *   Pointer to error structure.
4672  *
4673  * @return
4674  *   0 on success, a negative errno value otherwise and rte_errno is set.
4675  */
4676 static int
4677 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4678                                        const struct rte_flow_action *action,
4679                                        const uint64_t item_flags,
4680                                        struct rte_flow_error *error)
4681 {
4682         int ret = 0;
4683         uint64_t layer;
4684
4685         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4686         if (!ret) {
4687                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4688                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4689                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4690                 if (!(item_flags & layer))
4691                         return rte_flow_error_set(error, EINVAL,
4692                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4693                                                   NULL, "no TCP item in"
4694                                                   " pattern");
4695                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4696                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4697                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4698                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4699                         return rte_flow_error_set(error, EINVAL,
4700                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4701                                                   NULL,
4702                                                   "cannot decrease and increase"
4703                                                   " TCP sequence number"
4704                                                   " at the same time");
4705         }
4706         return ret;
4707 }
4708
4709 /**
4710  * Validate the modify-header actions of increment/decrement
4711  * TCP Acknowledgment number.
4712  *
4713  * @param[in] action_flags
4714  *   Holds the actions detected until now.
4715  * @param[in] action
4716  *   Pointer to the modify action.
4717  * @param[in] item_flags
4718  *   Holds the items detected.
4719  * @param[out] error
4720  *   Pointer to error structure.
4721  *
4722  * @return
4723  *   0 on success, a negative errno value otherwise and rte_errno is set.
4724  */
4725 static int
4726 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4727                                        const struct rte_flow_action *action,
4728                                        const uint64_t item_flags,
4729                                        struct rte_flow_error *error)
4730 {
4731         int ret = 0;
4732         uint64_t layer;
4733
4734         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4735         if (!ret) {
4736                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4737                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4738                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4739                 if (!(item_flags & layer))
4740                         return rte_flow_error_set(error, EINVAL,
4741                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4742                                                   NULL, "no TCP item in"
4743                                                   " pattern");
4744                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4745                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4746                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4747                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4748                         return rte_flow_error_set(error, EINVAL,
4749                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4750                                                   NULL,
4751                                                   "cannot decrease and increase"
4752                                                   " TCP acknowledgment number"
4753                                                   " at the same time");
4754         }
4755         return ret;
4756 }
4757
4758 /**
4759  * Validate the modify-header TTL actions.
4760  *
4761  * @param[in] action_flags
4762  *   Holds the actions detected until now.
4763  * @param[in] action
4764  *   Pointer to the modify action.
4765  * @param[in] item_flags
4766  *   Holds the items detected.
4767  * @param[out] error
4768  *   Pointer to error structure.
4769  *
4770  * @return
4771  *   0 on success, a negative errno value otherwise and rte_errno is set.
4772  */
4773 static int
4774 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4775                                    const struct rte_flow_action *action,
4776                                    const uint64_t item_flags,
4777                                    struct rte_flow_error *error)
4778 {
4779         int ret = 0;
4780         uint64_t layer;
4781
4782         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4783         if (!ret) {
4784                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4785                                  MLX5_FLOW_LAYER_INNER_L3 :
4786                                  MLX5_FLOW_LAYER_OUTER_L3;
4787                 if (!(item_flags & layer))
4788                         return rte_flow_error_set(error, EINVAL,
4789                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4790                                                   NULL,
4791                                                   "no IP protocol in pattern");
4792         }
4793         return ret;
4794 }
4795
4796 /**
4797  * Validate the generic modify field actions.
4798  * @param[in] dev
4799  *   Pointer to the rte_eth_dev structure.
4800  * @param[in] action_flags
4801  *   Holds the actions detected until now.
4802  * @param[in] action
4803  *   Pointer to the modify action.
4804  * @param[in] attr
4805  *   Pointer to the flow attributes.
4806  * @param[out] error
4807  *   Pointer to error structure.
4808  *
4809  * @return
4810  *   Number of header fields to modify (0 or more) on success,
4811  *   a negative errno value otherwise and rte_errno is set.
4812  */
4813 static int
4814 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4815                                    const uint64_t action_flags,
4816                                    const struct rte_flow_action *action,
4817                                    const struct rte_flow_attr *attr,
4818                                    struct rte_flow_error *error)
4819 {
4820         int ret = 0;
4821         struct mlx5_priv *priv = dev->data->dev_private;
4822         struct mlx5_dev_config *config = &priv->config;
4823         const struct rte_flow_action_modify_field *action_modify_field =
4824                 action->conf;
4825         uint32_t dst_width = mlx5_flow_item_field_width(config,
4826                                 action_modify_field->dst.field);
4827         uint32_t src_width = mlx5_flow_item_field_width(config,
4828                                 action_modify_field->src.field);
4829
4830         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4831         if (ret)
4832                 return ret;
4833
4834         if (action_modify_field->width == 0)
4835                 return rte_flow_error_set(error, EINVAL,
4836                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4837                                 "no bits are requested to be modified");
4838         else if (action_modify_field->width > dst_width ||
4839                  action_modify_field->width > src_width)
4840                 return rte_flow_error_set(error, EINVAL,
4841                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4842                                 "cannot modify more bits than"
4843                                 " the width of a field");
4844         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4845             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4846                 if ((action_modify_field->dst.offset +
4847                      action_modify_field->width > dst_width) ||
4848                     (action_modify_field->dst.offset % 32))
4849                         return rte_flow_error_set(error, EINVAL,
4850                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4851                                         "destination offset is too big"
4852                                         " or not aligned to 4 bytes");
4853                 if (action_modify_field->dst.level &&
4854                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4855                         return rte_flow_error_set(error, ENOTSUP,
4856                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4857                                         "inner header fields modification"
4858                                         " is not supported");
4859         }
4860         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4861             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4862                 if (!attr->transfer && !attr->group)
4863                         return rte_flow_error_set(error, ENOTSUP,
4864                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4865                                         "modify field action is not"
4866                                         " supported for group 0");
4867                 if ((action_modify_field->src.offset +
4868                      action_modify_field->width > src_width) ||
4869                     (action_modify_field->src.offset % 32))
4870                         return rte_flow_error_set(error, EINVAL,
4871                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4872                                         "source offset is too big"
4873                                         " or not aligned to 4 bytes");
4874                 if (action_modify_field->src.level &&
4875                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4876                         return rte_flow_error_set(error, ENOTSUP,
4877                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4878                                         "inner header fields modification"
4879                                         " is not supported");
4880         }
4881         if ((action_modify_field->dst.field ==
4882              action_modify_field->src.field) &&
4883             (action_modify_field->dst.level ==
4884              action_modify_field->src.level))
4885                 return rte_flow_error_set(error, EINVAL,
4886                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4887                                 "source and destination fields"
4888                                 " cannot be the same");
4889         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4890             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER)
4891                 return rte_flow_error_set(error, EINVAL,
4892                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4893                                 "immediate value or a pointer to it"
4894                                 " cannot be used as a destination");
4895         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4896             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4897                 return rte_flow_error_set(error, ENOTSUP,
4898                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4899                                 "modifications of an arbitrary"
4900                                 " place in a packet is not supported");
4901         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4902             action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4903                 return rte_flow_error_set(error, ENOTSUP,
4904                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4905                                 "modifications of the 802.1Q Tag"
4906                                 " Identifier is not supported");
4907         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4908             action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4909                 return rte_flow_error_set(error, ENOTSUP,
4910                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4911                                 "modifications of the VXLAN Network"
4912                                 " Identifier is not supported");
4913         if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4914             action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4915                 return rte_flow_error_set(error, ENOTSUP,
4916                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4917                                 "modifications of the GENEVE Network"
4918                                 " Identifier is not supported");
4919         if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4920             action_modify_field->src.field == RTE_FLOW_FIELD_MARK ||
4921             action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
4922             action_modify_field->src.field == RTE_FLOW_FIELD_META) {
4923                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4924                     !mlx5_flow_ext_mreg_supported(dev))
4925                         return rte_flow_error_set(error, ENOTSUP,
4926                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4927                                         "cannot modify mark or metadata without"
4928                                         " extended metadata register support");
4929         }
4930         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4931                 return rte_flow_error_set(error, ENOTSUP,
4932                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4933                                 "add and sub operations"
4934                                 " are not supported");
4935         return (action_modify_field->width / 32) +
4936                !!(action_modify_field->width % 32);
4937 }
4938
4939 /**
4940  * Validate jump action.
4941  *
4942  * @param[in] action
4943  *   Pointer to the jump action.
4944  * @param[in] action_flags
4945  *   Holds the actions detected until now.
4946  * @param[in] attributes
4947  *   Pointer to flow attributes
4948  * @param[in] external
4949  *   Action belongs to flow rule created by request external to PMD.
4950  * @param[out] error
4951  *   Pointer to error structure.
4952  *
4953  * @return
4954  *   0 on success, a negative errno value otherwise and rte_errno is set.
4955  */
4956 static int
4957 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4958                              const struct mlx5_flow_tunnel *tunnel,
4959                              const struct rte_flow_action *action,
4960                              uint64_t action_flags,
4961                              const struct rte_flow_attr *attributes,
4962                              bool external, struct rte_flow_error *error)
4963 {
4964         uint32_t target_group, table;
4965         int ret = 0;
4966         struct flow_grp_info grp_info = {
4967                 .external = !!external,
4968                 .transfer = !!attributes->transfer,
4969                 .fdb_def_rule = 1,
4970                 .std_tbl_fix = 0
4971         };
4972         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4973                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4974                 return rte_flow_error_set(error, EINVAL,
4975                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4976                                           "can't have 2 fate actions in"
4977                                           " same flow");
4978         if (!action->conf)
4979                 return rte_flow_error_set(error, EINVAL,
4980                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4981                                           NULL, "action configuration not set");
4982         target_group =
4983                 ((const struct rte_flow_action_jump *)action->conf)->group;
4984         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4985                                        &grp_info, error);
4986         if (ret)
4987                 return ret;
4988         if (attributes->group == target_group &&
4989             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4990                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
4991                 return rte_flow_error_set(error, EINVAL,
4992                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4993                                           "target group must be other than"
4994                                           " the current flow group");
4995         return 0;
4996 }
4997
4998 /*
4999  * Validate the port_id action.
5000  *
5001  * @param[in] dev
5002  *   Pointer to rte_eth_dev structure.
5003  * @param[in] action_flags
5004  *   Bit-fields that holds the actions detected until now.
5005  * @param[in] action
5006  *   Port_id RTE action structure.
5007  * @param[in] attr
5008  *   Attributes of flow that includes this action.
5009  * @param[out] error
5010  *   Pointer to error structure.
5011  *
5012  * @return
5013  *   0 on success, a negative errno value otherwise and rte_errno is set.
5014  */
5015 static int
5016 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
5017                                 uint64_t action_flags,
5018                                 const struct rte_flow_action *action,
5019                                 const struct rte_flow_attr *attr,
5020                                 struct rte_flow_error *error)
5021 {
5022         const struct rte_flow_action_port_id *port_id;
5023         struct mlx5_priv *act_priv;
5024         struct mlx5_priv *dev_priv;
5025         uint16_t port;
5026
5027         if (!attr->transfer)
5028                 return rte_flow_error_set(error, ENOTSUP,
5029                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5030                                           NULL,
5031                                           "port id action is valid in transfer"
5032                                           " mode only");
5033         if (!action || !action->conf)
5034                 return rte_flow_error_set(error, ENOTSUP,
5035                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5036                                           NULL,
5037                                           "port id action parameters must be"
5038                                           " specified");
5039         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
5040                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5041                 return rte_flow_error_set(error, EINVAL,
5042                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5043                                           "can have only one fate actions in"
5044                                           " a flow");
5045         dev_priv = mlx5_dev_to_eswitch_info(dev);
5046         if (!dev_priv)
5047                 return rte_flow_error_set(error, rte_errno,
5048                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5049                                           NULL,
5050                                           "failed to obtain E-Switch info");
5051         port_id = action->conf;
5052         port = port_id->original ? dev->data->port_id : port_id->id;
5053         act_priv = mlx5_port_to_eswitch_info(port, false);
5054         if (!act_priv)
5055                 return rte_flow_error_set
5056                                 (error, rte_errno,
5057                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
5058                                  "failed to obtain E-Switch port id for port");
5059         if (act_priv->domain_id != dev_priv->domain_id)
5060                 return rte_flow_error_set
5061                                 (error, EINVAL,
5062                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5063                                  "port does not belong to"
5064                                  " E-Switch being configured");
5065         return 0;
5066 }
5067
5068 /**
5069  * Get the maximum number of modify header actions.
5070  *
5071  * @param dev
5072  *   Pointer to rte_eth_dev structure.
5073  * @param root
5074  *   Whether action is on root table.
5075  *
5076  * @return
5077  *   Max number of modify header actions device can support.
5078  */
5079 static inline unsigned int
5080 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
5081                               bool root)
5082 {
5083         /*
5084          * There's no way to directly query the max capacity from FW.
5085          * The maximal value on root table should be assumed to be supported.
5086          */
5087         if (!root)
5088                 return MLX5_MAX_MODIFY_NUM;
5089         else
5090                 return MLX5_ROOT_TBL_MODIFY_NUM;
5091 }
5092
5093 /**
5094  * Validate the meter action.
5095  *
5096  * @param[in] dev
5097  *   Pointer to rte_eth_dev structure.
5098  * @param[in] action_flags
5099  *   Bit-fields that holds the actions detected until now.
5100  * @param[in] action
5101  *   Pointer to the meter action.
5102  * @param[in] attr
5103  *   Attributes of flow that includes this action.
5104  * @param[in] port_id_item
5105  *   Pointer to item indicating port id.
5106  * @param[out] error
5107  *   Pointer to error structure.
5108  *
5109  * @return
5110  *   0 on success, a negative errno value otherwise and rte_ernno is set.
5111  */
5112 static int
5113 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
5114                                 uint64_t action_flags,
5115                                 const struct rte_flow_action *action,
5116                                 const struct rte_flow_attr *attr,
5117                                 const struct rte_flow_item *port_id_item,
5118                                 bool *def_policy,
5119                                 struct rte_flow_error *error)
5120 {
5121         struct mlx5_priv *priv = dev->data->dev_private;
5122         const struct rte_flow_action_meter *am = action->conf;
5123         struct mlx5_flow_meter_info *fm;
5124         struct mlx5_flow_meter_policy *mtr_policy;
5125         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
5126
5127         if (!am)
5128                 return rte_flow_error_set(error, EINVAL,
5129                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5130                                           "meter action conf is NULL");
5131
5132         if (action_flags & MLX5_FLOW_ACTION_METER)
5133                 return rte_flow_error_set(error, ENOTSUP,
5134                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5135                                           "meter chaining not support");
5136         if (action_flags & MLX5_FLOW_ACTION_JUMP)
5137                 return rte_flow_error_set(error, ENOTSUP,
5138                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5139                                           "meter with jump not support");
5140         if (!priv->mtr_en)
5141                 return rte_flow_error_set(error, ENOTSUP,
5142                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5143                                           NULL,
5144                                           "meter action not supported");
5145         fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
5146         if (!fm)
5147                 return rte_flow_error_set(error, EINVAL,
5148                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5149                                           "Meter not found");
5150         /* aso meter can always be shared by different domains */
5151         if (fm->ref_cnt && !priv->sh->meter_aso_en &&
5152             !(fm->transfer == attr->transfer ||
5153               (!fm->ingress && !attr->ingress && attr->egress) ||
5154               (!fm->egress && !attr->egress && attr->ingress)))
5155                 return rte_flow_error_set(error, EINVAL,
5156                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5157                         "Flow attributes domain are either invalid "
5158                         "or have a domain conflict with current "
5159                         "meter attributes");
5160         if (fm->def_policy) {
5161                 if (!((attr->transfer &&
5162                         mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
5163                         (attr->egress &&
5164                         mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
5165                         (attr->ingress &&
5166                         mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
5167                         return rte_flow_error_set(error, EINVAL,
5168                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5169                                           "Flow attributes domain "
5170                                           "have a conflict with current "
5171                                           "meter domain attributes");
5172                 *def_policy = true;
5173         } else {
5174                 mtr_policy = mlx5_flow_meter_policy_find(dev,
5175                                                 fm->policy_id, NULL);
5176                 if (!mtr_policy)
5177                         return rte_flow_error_set(error, EINVAL,
5178                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5179                                           "Invalid policy id for meter ");
5180                 if (!((attr->transfer && mtr_policy->transfer) ||
5181                         (attr->egress && mtr_policy->egress) ||
5182                         (attr->ingress && mtr_policy->ingress)))
5183                         return rte_flow_error_set(error, EINVAL,
5184                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5185                                           "Flow attributes domain "
5186                                           "have a conflict with current "
5187                                           "meter domain attributes");
5188                 if (attr->transfer && mtr_policy->dev) {
5189                         /**
5190                          * When policy has fate action of port_id,
5191                          * the flow should have the same src port as policy.
5192                          */
5193                         struct mlx5_priv *policy_port_priv =
5194                                         mtr_policy->dev->data->dev_private;
5195                         int32_t flow_src_port = priv->representor_id;
5196
5197                         if (port_id_item) {
5198                                 const struct rte_flow_item_port_id *spec =
5199                                                         port_id_item->spec;
5200                                 struct mlx5_priv *port_priv =
5201                                         mlx5_port_to_eswitch_info(spec->id,
5202                                                                   false);
5203                                 if (!port_priv)
5204                                         return rte_flow_error_set(error,
5205                                                 rte_errno,
5206                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5207                                                 spec,
5208                                                 "Failed to get port info.");
5209                                 flow_src_port = port_priv->representor_id;
5210                         }
5211                         if (flow_src_port != policy_port_priv->representor_id)
5212                                 return rte_flow_error_set(error,
5213                                                 rte_errno,
5214                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5215                                                 NULL,
5216                                                 "Flow and meter policy "
5217                                                 "have different src port.");
5218                 }
5219                 *def_policy = false;
5220         }
5221         return 0;
5222 }
5223
5224 /**
5225  * Validate the age action.
5226  *
5227  * @param[in] action_flags
5228  *   Holds the actions detected until now.
5229  * @param[in] action
5230  *   Pointer to the age action.
5231  * @param[in] dev
5232  *   Pointer to the Ethernet device structure.
5233  * @param[out] error
5234  *   Pointer to error structure.
5235  *
5236  * @return
5237  *   0 on success, a negative errno value otherwise and rte_errno is set.
5238  */
5239 static int
5240 flow_dv_validate_action_age(uint64_t action_flags,
5241                             const struct rte_flow_action *action,
5242                             struct rte_eth_dev *dev,
5243                             struct rte_flow_error *error)
5244 {
5245         struct mlx5_priv *priv = dev->data->dev_private;
5246         const struct rte_flow_action_age *age = action->conf;
5247
5248         if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
5249             !priv->sh->aso_age_mng))
5250                 return rte_flow_error_set(error, ENOTSUP,
5251                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5252                                           NULL,
5253                                           "age action not supported");
5254         if (!(action->conf))
5255                 return rte_flow_error_set(error, EINVAL,
5256                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5257                                           "configuration cannot be null");
5258         if (!(age->timeout))
5259                 return rte_flow_error_set(error, EINVAL,
5260                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5261                                           "invalid timeout value 0");
5262         if (action_flags & MLX5_FLOW_ACTION_AGE)
5263                 return rte_flow_error_set(error, EINVAL,
5264                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5265                                           "duplicate age actions set");
5266         return 0;
5267 }
5268
5269 /**
5270  * Validate the modify-header IPv4 DSCP actions.
5271  *
5272  * @param[in] action_flags
5273  *   Holds the actions detected until now.
5274  * @param[in] action
5275  *   Pointer to the modify action.
5276  * @param[in] item_flags
5277  *   Holds the items detected.
5278  * @param[out] error
5279  *   Pointer to error structure.
5280  *
5281  * @return
5282  *   0 on success, a negative errno value otherwise and rte_errno is set.
5283  */
5284 static int
5285 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5286                                          const struct rte_flow_action *action,
5287                                          const uint64_t item_flags,
5288                                          struct rte_flow_error *error)
5289 {
5290         int ret = 0;
5291
5292         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5293         if (!ret) {
5294                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5295                         return rte_flow_error_set(error, EINVAL,
5296                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5297                                                   NULL,
5298                                                   "no ipv4 item in pattern");
5299         }
5300         return ret;
5301 }
5302
5303 /**
5304  * Validate the modify-header IPv6 DSCP actions.
5305  *
5306  * @param[in] action_flags
5307  *   Holds the actions detected until now.
5308  * @param[in] action
5309  *   Pointer to the modify action.
5310  * @param[in] item_flags
5311  *   Holds the items detected.
5312  * @param[out] error
5313  *   Pointer to error structure.
5314  *
5315  * @return
5316  *   0 on success, a negative errno value otherwise and rte_errno is set.
5317  */
5318 static int
5319 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5320                                          const struct rte_flow_action *action,
5321                                          const uint64_t item_flags,
5322                                          struct rte_flow_error *error)
5323 {
5324         int ret = 0;
5325
5326         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5327         if (!ret) {
5328                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5329                         return rte_flow_error_set(error, EINVAL,
5330                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5331                                                   NULL,
5332                                                   "no ipv6 item in pattern");
5333         }
5334         return ret;
5335 }
5336
5337 /**
5338  * Match modify-header resource.
5339  *
5340  * @param list
5341  *   Pointer to the hash list.
5342  * @param entry
5343  *   Pointer to exist resource entry object.
5344  * @param key
5345  *   Key of the new entry.
5346  * @param ctx
5347  *   Pointer to new modify-header resource.
5348  *
5349  * @return
5350  *   0 on matching, non-zero otherwise.
5351  */
5352 int
5353 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
5354                         struct mlx5_hlist_entry *entry,
5355                         uint64_t key __rte_unused, void *cb_ctx)
5356 {
5357         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5358         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5359         struct mlx5_flow_dv_modify_hdr_resource *resource =
5360                         container_of(entry, typeof(*resource), entry);
5361         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5362
5363         key_len += ref->actions_num * sizeof(ref->actions[0]);
5364         return ref->actions_num != resource->actions_num ||
5365                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5366 }
5367
5368 struct mlx5_hlist_entry *
5369 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
5370                          void *cb_ctx)
5371 {
5372         struct mlx5_dev_ctx_shared *sh = list->ctx;
5373         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5374         struct mlx5dv_dr_domain *ns;
5375         struct mlx5_flow_dv_modify_hdr_resource *entry;
5376         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5377         int ret;
5378         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5379         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5380
5381         entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
5382                             SOCKET_ID_ANY);
5383         if (!entry) {
5384                 rte_flow_error_set(ctx->error, ENOMEM,
5385                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5386                                    "cannot allocate resource memory");
5387                 return NULL;
5388         }
5389         rte_memcpy(&entry->ft_type,
5390                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5391                    key_len + data_len);
5392         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5393                 ns = sh->fdb_domain;
5394         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5395                 ns = sh->tx_domain;
5396         else
5397                 ns = sh->rx_domain;
5398         ret = mlx5_flow_os_create_flow_action_modify_header
5399                                         (sh->ctx, ns, entry,
5400                                          data_len, &entry->action);
5401         if (ret) {
5402                 mlx5_free(entry);
5403                 rte_flow_error_set(ctx->error, ENOMEM,
5404                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5405                                    NULL, "cannot create modification action");
5406                 return NULL;
5407         }
5408         return &entry->entry;
5409 }
5410
5411 /**
5412  * Validate the sample action.
5413  *
5414  * @param[in, out] action_flags
5415  *   Holds the actions detected until now.
5416  * @param[in] action
5417  *   Pointer to the sample action.
5418  * @param[in] dev
5419  *   Pointer to the Ethernet device structure.
5420  * @param[in] attr
5421  *   Attributes of flow that includes this action.
5422  * @param[in] item_flags
5423  *   Holds the items detected.
5424  * @param[in] rss
5425  *   Pointer to the RSS action.
5426  * @param[out] sample_rss
5427  *   Pointer to the RSS action in sample action list.
5428  * @param[out] count
5429  *   Pointer to the COUNT action in sample action list.
5430  * @param[out] fdb_mirror_limit
5431  *   Pointer to the FDB mirror limitation flag.
5432  * @param[out] error
5433  *   Pointer to error structure.
5434  *
5435  * @return
5436  *   0 on success, a negative errno value otherwise and rte_errno is set.
5437  */
5438 static int
5439 flow_dv_validate_action_sample(uint64_t *action_flags,
5440                                const struct rte_flow_action *action,
5441                                struct rte_eth_dev *dev,
5442                                const struct rte_flow_attr *attr,
5443                                uint64_t item_flags,
5444                                const struct rte_flow_action_rss *rss,
5445                                const struct rte_flow_action_rss **sample_rss,
5446                                const struct rte_flow_action_count **count,
5447                                int *fdb_mirror_limit,
5448                                struct rte_flow_error *error)
5449 {
5450         struct mlx5_priv *priv = dev->data->dev_private;
5451         struct mlx5_dev_config *dev_conf = &priv->config;
5452         const struct rte_flow_action_sample *sample = action->conf;
5453         const struct rte_flow_action *act;
5454         uint64_t sub_action_flags = 0;
5455         uint16_t queue_index = 0xFFFF;
5456         int actions_n = 0;
5457         int ret;
5458
5459         if (!sample)
5460                 return rte_flow_error_set(error, EINVAL,
5461                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5462                                           "configuration cannot be NULL");
5463         if (sample->ratio == 0)
5464                 return rte_flow_error_set(error, EINVAL,
5465                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5466                                           "ratio value starts from 1");
5467         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
5468                 return rte_flow_error_set(error, ENOTSUP,
5469                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5470                                           NULL,
5471                                           "sample action not supported");
5472         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5473                 return rte_flow_error_set(error, EINVAL,
5474                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5475                                           "Multiple sample actions not "
5476                                           "supported");
5477         if (*action_flags & MLX5_FLOW_ACTION_METER)
5478                 return rte_flow_error_set(error, EINVAL,
5479                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5480                                           "wrong action order, meter should "
5481                                           "be after sample action");
5482         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5483                 return rte_flow_error_set(error, EINVAL,
5484                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5485                                           "wrong action order, jump should "
5486                                           "be after sample action");
5487         act = sample->actions;
5488         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5489                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5490                         return rte_flow_error_set(error, ENOTSUP,
5491                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5492                                                   act, "too many actions");
5493                 switch (act->type) {
5494                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5495                         ret = mlx5_flow_validate_action_queue(act,
5496                                                               sub_action_flags,
5497                                                               dev,
5498                                                               attr, error);
5499                         if (ret < 0)
5500                                 return ret;
5501                         queue_index = ((const struct rte_flow_action_queue *)
5502                                                         (act->conf))->index;
5503                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5504                         ++actions_n;
5505                         break;
5506                 case RTE_FLOW_ACTION_TYPE_RSS:
5507                         *sample_rss = act->conf;
5508                         ret = mlx5_flow_validate_action_rss(act,
5509                                                             sub_action_flags,
5510                                                             dev, attr,
5511                                                             item_flags,
5512                                                             error);
5513                         if (ret < 0)
5514                                 return ret;
5515                         if (rss && *sample_rss &&
5516                             ((*sample_rss)->level != rss->level ||
5517                             (*sample_rss)->types != rss->types))
5518                                 return rte_flow_error_set(error, ENOTSUP,
5519                                         RTE_FLOW_ERROR_TYPE_ACTION,
5520                                         NULL,
5521                                         "Can't use the different RSS types "
5522                                         "or level in the same flow");
5523                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5524                                 queue_index = (*sample_rss)->queue[0];
5525                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5526                         ++actions_n;
5527                         break;
5528                 case RTE_FLOW_ACTION_TYPE_MARK:
5529                         ret = flow_dv_validate_action_mark(dev, act,
5530                                                            sub_action_flags,
5531                                                            attr, error);
5532                         if (ret < 0)
5533                                 return ret;
5534                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5535                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5536                                                 MLX5_FLOW_ACTION_MARK_EXT;
5537                         else
5538                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5539                         ++actions_n;
5540                         break;
5541                 case RTE_FLOW_ACTION_TYPE_COUNT:
5542                         ret = flow_dv_validate_action_count
5543                                 (dev, is_shared_action_count(act),
5544                                  *action_flags | sub_action_flags,
5545                                  error);
5546                         if (ret < 0)
5547                                 return ret;
5548                         *count = act->conf;
5549                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5550                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5551                         ++actions_n;
5552                         break;
5553                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5554                         ret = flow_dv_validate_action_port_id(dev,
5555                                                               sub_action_flags,
5556                                                               act,
5557                                                               attr,
5558                                                               error);
5559                         if (ret)
5560                                 return ret;
5561                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5562                         ++actions_n;
5563                         break;
5564                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5565                         ret = flow_dv_validate_action_raw_encap_decap
5566                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5567                                  &actions_n, action, item_flags, error);
5568                         if (ret < 0)
5569                                 return ret;
5570                         ++actions_n;
5571                         break;
5572                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5573                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5574                         ret = flow_dv_validate_action_l2_encap(dev,
5575                                                                sub_action_flags,
5576                                                                act, attr,
5577                                                                error);
5578                         if (ret < 0)
5579                                 return ret;
5580                         sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5581                         ++actions_n;
5582                         break;
5583                 default:
5584                         return rte_flow_error_set(error, ENOTSUP,
5585                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5586                                                   NULL,
5587                                                   "Doesn't support optional "
5588                                                   "action");
5589                 }
5590         }
5591         if (attr->ingress && !attr->transfer) {
5592                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5593                                           MLX5_FLOW_ACTION_RSS)))
5594                         return rte_flow_error_set(error, EINVAL,
5595                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5596                                                   NULL,
5597                                                   "Ingress must has a dest "
5598                                                   "QUEUE for Sample");
5599         } else if (attr->egress && !attr->transfer) {
5600                 return rte_flow_error_set(error, ENOTSUP,
5601                                           RTE_FLOW_ERROR_TYPE_ACTION,
5602                                           NULL,
5603                                           "Sample Only support Ingress "
5604                                           "or E-Switch");
5605         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5606                 MLX5_ASSERT(attr->transfer);
5607                 if (sample->ratio > 1)
5608                         return rte_flow_error_set(error, ENOTSUP,
5609                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5610                                                   NULL,
5611                                                   "E-Switch doesn't support "
5612                                                   "any optional action "
5613                                                   "for sampling");
5614                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5615                         return rte_flow_error_set(error, ENOTSUP,
5616                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5617                                                   NULL,
5618                                                   "unsupported action QUEUE");
5619                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5620                         return rte_flow_error_set(error, ENOTSUP,
5621                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5622                                                   NULL,
5623                                                   "unsupported action QUEUE");
5624                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5625                         return rte_flow_error_set(error, EINVAL,
5626                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5627                                                   NULL,
5628                                                   "E-Switch must has a dest "
5629                                                   "port for mirroring");
5630                 if (!priv->config.hca_attr.reg_c_preserve &&
5631                      priv->representor_id != UINT16_MAX)
5632                         *fdb_mirror_limit = 1;
5633         }
5634         /* Continue validation for Xcap actions.*/
5635         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5636             (queue_index == 0xFFFF ||
5637              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5638                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5639                      MLX5_FLOW_XCAP_ACTIONS)
5640                         return rte_flow_error_set(error, ENOTSUP,
5641                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5642                                                   NULL, "encap and decap "
5643                                                   "combination aren't "
5644                                                   "supported");
5645                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5646                                                         MLX5_FLOW_ACTION_ENCAP))
5647                         return rte_flow_error_set(error, ENOTSUP,
5648                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5649                                                   NULL, "encap is not supported"
5650                                                   " for ingress traffic");
5651         }
5652         return 0;
5653 }
5654
5655 /**
5656  * Find existing modify-header resource or create and register a new one.
5657  *
5658  * @param dev[in, out]
5659  *   Pointer to rte_eth_dev structure.
5660  * @param[in, out] resource
5661  *   Pointer to modify-header resource.
5662  * @parm[in, out] dev_flow
5663  *   Pointer to the dev_flow.
5664  * @param[out] error
5665  *   pointer to error structure.
5666  *
5667  * @return
5668  *   0 on success otherwise -errno and errno is set.
5669  */
5670 static int
5671 flow_dv_modify_hdr_resource_register
5672                         (struct rte_eth_dev *dev,
5673                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5674                          struct mlx5_flow *dev_flow,
5675                          struct rte_flow_error *error)
5676 {
5677         struct mlx5_priv *priv = dev->data->dev_private;
5678         struct mlx5_dev_ctx_shared *sh = priv->sh;
5679         uint32_t key_len = sizeof(*resource) -
5680                            offsetof(typeof(*resource), ft_type) +
5681                            resource->actions_num * sizeof(resource->actions[0]);
5682         struct mlx5_hlist_entry *entry;
5683         struct mlx5_flow_cb_ctx ctx = {
5684                 .error = error,
5685                 .data = resource,
5686         };
5687         uint64_t key64;
5688
5689         resource->root = !dev_flow->dv.group;
5690         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5691                                                                 resource->root))
5692                 return rte_flow_error_set(error, EOVERFLOW,
5693                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5694                                           "too many modify header items");
5695         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5696         entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx);
5697         if (!entry)
5698                 return -rte_errno;
5699         resource = container_of(entry, typeof(*resource), entry);
5700         dev_flow->handle->dvh.modify_hdr = resource;
5701         return 0;
5702 }
5703
5704 /**
5705  * Get DV flow counter by index.
5706  *
5707  * @param[in] dev
5708  *   Pointer to the Ethernet device structure.
5709  * @param[in] idx
5710  *   mlx5 flow counter index in the container.
5711  * @param[out] ppool
5712  *   mlx5 flow counter pool in the container.
5713  *
5714  * @return
5715  *   Pointer to the counter, NULL otherwise.
5716  */
5717 static struct mlx5_flow_counter *
5718 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5719                            uint32_t idx,
5720                            struct mlx5_flow_counter_pool **ppool)
5721 {
5722         struct mlx5_priv *priv = dev->data->dev_private;
5723         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5724         struct mlx5_flow_counter_pool *pool;
5725
5726         /* Decrease to original index and clear shared bit. */
5727         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5728         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5729         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5730         MLX5_ASSERT(pool);
5731         if (ppool)
5732                 *ppool = pool;
5733         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5734 }
5735
5736 /**
5737  * Check the devx counter belongs to the pool.
5738  *
5739  * @param[in] pool
5740  *   Pointer to the counter pool.
5741  * @param[in] id
5742  *   The counter devx ID.
5743  *
5744  * @return
5745  *   True if counter belongs to the pool, false otherwise.
5746  */
5747 static bool
5748 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5749 {
5750         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5751                    MLX5_COUNTERS_PER_POOL;
5752
5753         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5754                 return true;
5755         return false;
5756 }
5757
5758 /**
5759  * Get a pool by devx counter ID.
5760  *
5761  * @param[in] cmng
5762  *   Pointer to the counter management.
5763  * @param[in] id
5764  *   The counter devx ID.
5765  *
5766  * @return
5767  *   The counter pool pointer if exists, NULL otherwise,
5768  */
5769 static struct mlx5_flow_counter_pool *
5770 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5771 {
5772         uint32_t i;
5773         struct mlx5_flow_counter_pool *pool = NULL;
5774
5775         rte_spinlock_lock(&cmng->pool_update_sl);
5776         /* Check last used pool. */
5777         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5778             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5779                 pool = cmng->pools[cmng->last_pool_idx];
5780                 goto out;
5781         }
5782         /* ID out of range means no suitable pool in the container. */
5783         if (id > cmng->max_id || id < cmng->min_id)
5784                 goto out;
5785         /*
5786          * Find the pool from the end of the container, since mostly counter
5787          * ID is sequence increasing, and the last pool should be the needed
5788          * one.
5789          */
5790         i = cmng->n_valid;
5791         while (i--) {
5792                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5793
5794                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5795                         pool = pool_tmp;
5796                         break;
5797                 }
5798         }
5799 out:
5800         rte_spinlock_unlock(&cmng->pool_update_sl);
5801         return pool;
5802 }
5803
5804 /**
5805  * Resize a counter container.
5806  *
5807  * @param[in] dev
5808  *   Pointer to the Ethernet device structure.
5809  *
5810  * @return
5811  *   0 on success, otherwise negative errno value and rte_errno is set.
5812  */
5813 static int
5814 flow_dv_container_resize(struct rte_eth_dev *dev)
5815 {
5816         struct mlx5_priv *priv = dev->data->dev_private;
5817         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5818         void *old_pools = cmng->pools;
5819         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5820         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5821         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5822
5823         if (!pools) {
5824                 rte_errno = ENOMEM;
5825                 return -ENOMEM;
5826         }
5827         if (old_pools)
5828                 memcpy(pools, old_pools, cmng->n *
5829                                        sizeof(struct mlx5_flow_counter_pool *));
5830         cmng->n = resize;
5831         cmng->pools = pools;
5832         if (old_pools)
5833                 mlx5_free(old_pools);
5834         return 0;
5835 }
5836
5837 /**
5838  * Query a devx flow counter.
5839  *
5840  * @param[in] dev
5841  *   Pointer to the Ethernet device structure.
5842  * @param[in] counter
5843  *   Index to the flow counter.
5844  * @param[out] pkts
5845  *   The statistics value of packets.
5846  * @param[out] bytes
5847  *   The statistics value of bytes.
5848  *
5849  * @return
5850  *   0 on success, otherwise a negative errno value and rte_errno is set.
5851  */
5852 static inline int
5853 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5854                      uint64_t *bytes)
5855 {
5856         struct mlx5_priv *priv = dev->data->dev_private;
5857         struct mlx5_flow_counter_pool *pool = NULL;
5858         struct mlx5_flow_counter *cnt;
5859         int offset;
5860
5861         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5862         MLX5_ASSERT(pool);
5863         if (priv->sh->cmng.counter_fallback)
5864                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
5865                                         0, pkts, bytes, 0, NULL, NULL, 0);
5866         rte_spinlock_lock(&pool->sl);
5867         if (!pool->raw) {
5868                 *pkts = 0;
5869                 *bytes = 0;
5870         } else {
5871                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
5872                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
5873                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
5874         }
5875         rte_spinlock_unlock(&pool->sl);
5876         return 0;
5877 }
5878
5879 /**
5880  * Create and initialize a new counter pool.
5881  *
5882  * @param[in] dev
5883  *   Pointer to the Ethernet device structure.
5884  * @param[out] dcs
5885  *   The devX counter handle.
5886  * @param[in] age
5887  *   Whether the pool is for counter that was allocated for aging.
5888  * @param[in/out] cont_cur
5889  *   Pointer to the container pointer, it will be update in pool resize.
5890  *
5891  * @return
5892  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
5893  */
5894 static struct mlx5_flow_counter_pool *
5895 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
5896                     uint32_t age)
5897 {
5898         struct mlx5_priv *priv = dev->data->dev_private;
5899         struct mlx5_flow_counter_pool *pool;
5900         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5901         bool fallback = priv->sh->cmng.counter_fallback;
5902         uint32_t size = sizeof(*pool);
5903
5904         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
5905         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
5906         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
5907         if (!pool) {
5908                 rte_errno = ENOMEM;
5909                 return NULL;
5910         }
5911         pool->raw = NULL;
5912         pool->is_aged = !!age;
5913         pool->query_gen = 0;
5914         pool->min_dcs = dcs;
5915         rte_spinlock_init(&pool->sl);
5916         rte_spinlock_init(&pool->csl);
5917         TAILQ_INIT(&pool->counters[0]);
5918         TAILQ_INIT(&pool->counters[1]);
5919         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
5920         rte_spinlock_lock(&cmng->pool_update_sl);
5921         pool->index = cmng->n_valid;
5922         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
5923                 mlx5_free(pool);
5924                 rte_spinlock_unlock(&cmng->pool_update_sl);
5925                 return NULL;
5926         }
5927         cmng->pools[pool->index] = pool;
5928         cmng->n_valid++;
5929         if (unlikely(fallback)) {
5930                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
5931
5932                 if (base < cmng->min_id)
5933                         cmng->min_id = base;
5934                 if (base > cmng->max_id)
5935                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
5936                 cmng->last_pool_idx = pool->index;
5937         }
5938         rte_spinlock_unlock(&cmng->pool_update_sl);
5939         return pool;
5940 }
5941
5942 /**
5943  * Prepare a new counter and/or a new counter pool.
5944  *
5945  * @param[in] dev
5946  *   Pointer to the Ethernet device structure.
5947  * @param[out] cnt_free
5948  *   Where to put the pointer of a new counter.
5949  * @param[in] age
5950  *   Whether the pool is for counter that was allocated for aging.
5951  *
5952  * @return
5953  *   The counter pool pointer and @p cnt_free is set on success,
5954  *   NULL otherwise and rte_errno is set.
5955  */
5956 static struct mlx5_flow_counter_pool *
5957 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
5958                              struct mlx5_flow_counter **cnt_free,
5959                              uint32_t age)
5960 {
5961         struct mlx5_priv *priv = dev->data->dev_private;
5962         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5963         struct mlx5_flow_counter_pool *pool;
5964         struct mlx5_counters tmp_tq;
5965         struct mlx5_devx_obj *dcs = NULL;
5966         struct mlx5_flow_counter *cnt;
5967         enum mlx5_counter_type cnt_type =
5968                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5969         bool fallback = priv->sh->cmng.counter_fallback;
5970         uint32_t i;
5971
5972         if (fallback) {
5973                 /* bulk_bitmap must be 0 for single counter allocation. */
5974                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
5975                 if (!dcs)
5976                         return NULL;
5977                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
5978                 if (!pool) {
5979                         pool = flow_dv_pool_create(dev, dcs, age);
5980                         if (!pool) {
5981                                 mlx5_devx_cmd_destroy(dcs);
5982                                 return NULL;
5983                         }
5984                 }
5985                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
5986                 cnt = MLX5_POOL_GET_CNT(pool, i);
5987                 cnt->pool = pool;
5988                 cnt->dcs_when_free = dcs;
5989                 *cnt_free = cnt;
5990                 return pool;
5991         }
5992         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
5993         if (!dcs) {
5994                 rte_errno = ENODATA;
5995                 return NULL;
5996         }
5997         pool = flow_dv_pool_create(dev, dcs, age);
5998         if (!pool) {
5999                 mlx5_devx_cmd_destroy(dcs);
6000                 return NULL;
6001         }
6002         TAILQ_INIT(&tmp_tq);
6003         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
6004                 cnt = MLX5_POOL_GET_CNT(pool, i);
6005                 cnt->pool = pool;
6006                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
6007         }
6008         rte_spinlock_lock(&cmng->csl[cnt_type]);
6009         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
6010         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6011         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
6012         (*cnt_free)->pool = pool;
6013         return pool;
6014 }
6015
6016 /**
6017  * Allocate a flow counter.
6018  *
6019  * @param[in] dev
6020  *   Pointer to the Ethernet device structure.
6021  * @param[in] age
6022  *   Whether the counter was allocated for aging.
6023  *
6024  * @return
6025  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6026  */
6027 static uint32_t
6028 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
6029 {
6030         struct mlx5_priv *priv = dev->data->dev_private;
6031         struct mlx5_flow_counter_pool *pool = NULL;
6032         struct mlx5_flow_counter *cnt_free = NULL;
6033         bool fallback = priv->sh->cmng.counter_fallback;
6034         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6035         enum mlx5_counter_type cnt_type =
6036                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6037         uint32_t cnt_idx;
6038
6039         if (!priv->config.devx) {
6040                 rte_errno = ENOTSUP;
6041                 return 0;
6042         }
6043         /* Get free counters from container. */
6044         rte_spinlock_lock(&cmng->csl[cnt_type]);
6045         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
6046         if (cnt_free)
6047                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
6048         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6049         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
6050                 goto err;
6051         pool = cnt_free->pool;
6052         if (fallback)
6053                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
6054         /* Create a DV counter action only in the first time usage. */
6055         if (!cnt_free->action) {
6056                 uint16_t offset;
6057                 struct mlx5_devx_obj *dcs;
6058                 int ret;
6059
6060                 if (!fallback) {
6061                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
6062                         dcs = pool->min_dcs;
6063                 } else {
6064                         offset = 0;
6065                         dcs = cnt_free->dcs_when_free;
6066                 }
6067                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
6068                                                             &cnt_free->action);
6069                 if (ret) {
6070                         rte_errno = errno;
6071                         goto err;
6072                 }
6073         }
6074         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
6075                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
6076         /* Update the counter reset values. */
6077         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
6078                                  &cnt_free->bytes))
6079                 goto err;
6080         if (!fallback && !priv->sh->cmng.query_thread_on)
6081                 /* Start the asynchronous batch query by the host thread. */
6082                 mlx5_set_query_alarm(priv->sh);
6083         /*
6084          * When the count action isn't shared (by ID), shared_info field is
6085          * used for indirect action API's refcnt.
6086          * When the counter action is not shared neither by ID nor by indirect
6087          * action API, shared info must be 1.
6088          */
6089         cnt_free->shared_info.refcnt = 1;
6090         return cnt_idx;
6091 err:
6092         if (cnt_free) {
6093                 cnt_free->pool = pool;
6094                 if (fallback)
6095                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
6096                 rte_spinlock_lock(&cmng->csl[cnt_type]);
6097                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
6098                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
6099         }
6100         return 0;
6101 }
6102
6103 /**
6104  * Allocate a shared flow counter.
6105  *
6106  * @param[in] ctx
6107  *   Pointer to the shared counter configuration.
6108  * @param[in] data
6109  *   Pointer to save the allocated counter index.
6110  *
6111  * @return
6112  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6113  */
6114
6115 static int32_t
6116 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
6117 {
6118         struct mlx5_shared_counter_conf *conf = ctx;
6119         struct rte_eth_dev *dev = conf->dev;
6120         struct mlx5_flow_counter *cnt;
6121
6122         data->dword = flow_dv_counter_alloc(dev, 0);
6123         data->dword |= MLX5_CNT_SHARED_OFFSET;
6124         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
6125         cnt->shared_info.id = conf->id;
6126         return 0;
6127 }
6128
6129 /**
6130  * Get a shared flow counter.
6131  *
6132  * @param[in] dev
6133  *   Pointer to the Ethernet device structure.
6134  * @param[in] id
6135  *   Counter identifier.
6136  *
6137  * @return
6138  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6139  */
6140 static uint32_t
6141 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
6142 {
6143         struct mlx5_priv *priv = dev->data->dev_private;
6144         struct mlx5_shared_counter_conf conf = {
6145                 .dev = dev,
6146                 .id = id,
6147         };
6148         union mlx5_l3t_data data = {
6149                 .dword = 0,
6150         };
6151
6152         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
6153                                flow_dv_counter_alloc_shared_cb, &conf);
6154         return data.dword;
6155 }
6156
6157 /**
6158  * Get age param from counter index.
6159  *
6160  * @param[in] dev
6161  *   Pointer to the Ethernet device structure.
6162  * @param[in] counter
6163  *   Index to the counter handler.
6164  *
6165  * @return
6166  *   The aging parameter specified for the counter index.
6167  */
6168 static struct mlx5_age_param*
6169 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
6170                                 uint32_t counter)
6171 {
6172         struct mlx5_flow_counter *cnt;
6173         struct mlx5_flow_counter_pool *pool = NULL;
6174
6175         flow_dv_counter_get_by_idx(dev, counter, &pool);
6176         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
6177         cnt = MLX5_POOL_GET_CNT(pool, counter);
6178         return MLX5_CNT_TO_AGE(cnt);
6179 }
6180
6181 /**
6182  * Remove a flow counter from aged counter list.
6183  *
6184  * @param[in] dev
6185  *   Pointer to the Ethernet device structure.
6186  * @param[in] counter
6187  *   Index to the counter handler.
6188  * @param[in] cnt
6189  *   Pointer to the counter handler.
6190  */
6191 static void
6192 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
6193                                 uint32_t counter, struct mlx5_flow_counter *cnt)
6194 {
6195         struct mlx5_age_info *age_info;
6196         struct mlx5_age_param *age_param;
6197         struct mlx5_priv *priv = dev->data->dev_private;
6198         uint16_t expected = AGE_CANDIDATE;
6199
6200         age_info = GET_PORT_AGE_INFO(priv);
6201         age_param = flow_dv_counter_idx_get_age(dev, counter);
6202         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
6203                                          AGE_FREE, false, __ATOMIC_RELAXED,
6204                                          __ATOMIC_RELAXED)) {
6205                 /**
6206                  * We need the lock even it is age timeout,
6207                  * since counter may still in process.
6208                  */
6209                 rte_spinlock_lock(&age_info->aged_sl);
6210                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
6211                 rte_spinlock_unlock(&age_info->aged_sl);
6212                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
6213         }
6214 }
6215
6216 /**
6217  * Release a flow counter.
6218  *
6219  * @param[in] dev
6220  *   Pointer to the Ethernet device structure.
6221  * @param[in] counter
6222  *   Index to the counter handler.
6223  */
6224 static void
6225 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
6226 {
6227         struct mlx5_priv *priv = dev->data->dev_private;
6228         struct mlx5_flow_counter_pool *pool = NULL;
6229         struct mlx5_flow_counter *cnt;
6230         enum mlx5_counter_type cnt_type;
6231
6232         if (!counter)
6233                 return;
6234         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6235         MLX5_ASSERT(pool);
6236         if (pool->is_aged) {
6237                 flow_dv_counter_remove_from_age(dev, counter, cnt);
6238         } else {
6239                 /*
6240                  * If the counter action is shared by ID, the l3t_clear_entry
6241                  * function reduces its references counter. If after the
6242                  * reduction the action is still referenced, the function
6243                  * returns here and does not release it.
6244                  */
6245                 if (IS_LEGACY_SHARED_CNT(counter) &&
6246                     mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl,
6247                                          cnt->shared_info.id))
6248                         return;
6249                 /*
6250                  * If the counter action is shared by indirect action API,
6251                  * the atomic function reduces its references counter.
6252                  * If after the reduction the action is still referenced, the
6253                  * function returns here and does not release it.
6254                  * When the counter action is not shared neither by ID nor by
6255                  * indirect action API, shared info is 1 before the reduction,
6256                  * so this condition is failed and function doesn't return here.
6257                  */
6258                 if (!IS_LEGACY_SHARED_CNT(counter) &&
6259                     __atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
6260                                        __ATOMIC_RELAXED))
6261                         return;
6262         }
6263         cnt->pool = pool;
6264         /*
6265          * Put the counter back to list to be updated in none fallback mode.
6266          * Currently, we are using two list alternately, while one is in query,
6267          * add the freed counter to the other list based on the pool query_gen
6268          * value. After query finishes, add counter the list to the global
6269          * container counter list. The list changes while query starts. In
6270          * this case, lock will not be needed as query callback and release
6271          * function both operate with the different list.
6272          */
6273         if (!priv->sh->cmng.counter_fallback) {
6274                 rte_spinlock_lock(&pool->csl);
6275                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
6276                 rte_spinlock_unlock(&pool->csl);
6277         } else {
6278                 cnt->dcs_when_free = cnt->dcs_when_active;
6279                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6280                                            MLX5_COUNTER_TYPE_ORIGIN;
6281                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
6282                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
6283                                   cnt, next);
6284                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
6285         }
6286 }
6287
6288 /**
6289  * Resize a meter id container.
6290  *
6291  * @param[in] dev
6292  *   Pointer to the Ethernet device structure.
6293  *
6294  * @return
6295  *   0 on success, otherwise negative errno value and rte_errno is set.
6296  */
6297 static int
6298 flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
6299 {
6300         struct mlx5_priv *priv = dev->data->dev_private;
6301         struct mlx5_aso_mtr_pools_mng *pools_mng =
6302                                 &priv->sh->mtrmng->pools_mng;
6303         void *old_pools = pools_mng->pools;
6304         uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
6305         uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
6306         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6307
6308         if (!pools) {
6309                 rte_errno = ENOMEM;
6310                 return -ENOMEM;
6311         }
6312         if (!pools_mng->n)
6313                 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6314                         mlx5_free(pools);
6315                         return -ENOMEM;
6316                 }
6317         if (old_pools)
6318                 memcpy(pools, old_pools, pools_mng->n *
6319                                        sizeof(struct mlx5_aso_mtr_pool *));
6320         pools_mng->n = resize;
6321         pools_mng->pools = pools;
6322         if (old_pools)
6323                 mlx5_free(old_pools);
6324         return 0;
6325 }
6326
6327 /**
6328  * Prepare a new meter and/or a new meter pool.
6329  *
6330  * @param[in] dev
6331  *   Pointer to the Ethernet device structure.
6332  * @param[out] mtr_free
6333  *   Where to put the pointer of a new meter.g.
6334  *
6335  * @return
6336  *   The meter pool pointer and @mtr_free is set on success,
6337  *   NULL otherwise and rte_errno is set.
6338  */
6339 static struct mlx5_aso_mtr_pool *
6340 flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
6341                              struct mlx5_aso_mtr **mtr_free)
6342 {
6343         struct mlx5_priv *priv = dev->data->dev_private;
6344         struct mlx5_aso_mtr_pools_mng *pools_mng =
6345                                 &priv->sh->mtrmng->pools_mng;
6346         struct mlx5_aso_mtr_pool *pool = NULL;
6347         struct mlx5_devx_obj *dcs = NULL;
6348         uint32_t i;
6349         uint32_t log_obj_size;
6350
6351         log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6352         dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->ctx,
6353                         priv->sh->pdn, log_obj_size);
6354         if (!dcs) {
6355                 rte_errno = ENODATA;
6356                 return NULL;
6357         }
6358         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6359         if (!pool) {
6360                 rte_errno = ENOMEM;
6361                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6362                 return NULL;
6363         }
6364         pool->devx_obj = dcs;
6365         pool->index = pools_mng->n_valid;
6366         if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6367                 mlx5_free(pool);
6368                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6369                 return NULL;
6370         }
6371         pools_mng->pools[pool->index] = pool;
6372         pools_mng->n_valid++;
6373         for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6374                 pool->mtrs[i].offset = i;
6375                 LIST_INSERT_HEAD(&pools_mng->meters,
6376                                                 &pool->mtrs[i], next);
6377         }
6378         pool->mtrs[0].offset = 0;
6379         *mtr_free = &pool->mtrs[0];
6380         return pool;
6381 }
6382
6383 /**
6384  * Release a flow meter into pool.
6385  *
6386  * @param[in] dev
6387  *   Pointer to the Ethernet device structure.
6388  * @param[in] mtr_idx
6389  *   Index to aso flow meter.
6390  */
6391 static void
6392 flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6393 {
6394         struct mlx5_priv *priv = dev->data->dev_private;
6395         struct mlx5_aso_mtr_pools_mng *pools_mng =
6396                                 &priv->sh->mtrmng->pools_mng;
6397         struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6398
6399         MLX5_ASSERT(aso_mtr);
6400         rte_spinlock_lock(&pools_mng->mtrsl);
6401         memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6402         aso_mtr->state = ASO_METER_FREE;
6403         LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6404         rte_spinlock_unlock(&pools_mng->mtrsl);
6405 }
6406
6407 /**
6408  * Allocate a aso flow meter.
6409  *
6410  * @param[in] dev
6411  *   Pointer to the Ethernet device structure.
6412  *
6413  * @return
6414  *   Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6415  */
6416 static uint32_t
6417 flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6418 {
6419         struct mlx5_priv *priv = dev->data->dev_private;
6420         struct mlx5_aso_mtr *mtr_free = NULL;
6421         struct mlx5_aso_mtr_pools_mng *pools_mng =
6422                                 &priv->sh->mtrmng->pools_mng;
6423         struct mlx5_aso_mtr_pool *pool;
6424         uint32_t mtr_idx = 0;
6425
6426         if (!priv->config.devx) {
6427                 rte_errno = ENOTSUP;
6428                 return 0;
6429         }
6430         /* Allocate the flow meter memory. */
6431         /* Get free meters from management. */
6432         rte_spinlock_lock(&pools_mng->mtrsl);
6433         mtr_free = LIST_FIRST(&pools_mng->meters);
6434         if (mtr_free)
6435                 LIST_REMOVE(mtr_free, next);
6436         if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6437                 rte_spinlock_unlock(&pools_mng->mtrsl);
6438                 return 0;
6439         }
6440         mtr_free->state = ASO_METER_WAIT;
6441         rte_spinlock_unlock(&pools_mng->mtrsl);
6442         pool = container_of(mtr_free,
6443                         struct mlx5_aso_mtr_pool,
6444                         mtrs[mtr_free->offset]);
6445         mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6446         if (!mtr_free->fm.meter_action) {
6447 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6448                 struct rte_flow_error error;
6449                 uint8_t reg_id;
6450
6451                 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6452                 mtr_free->fm.meter_action =
6453                         mlx5_glue->dv_create_flow_action_aso
6454                                                 (priv->sh->rx_domain,
6455                                                  pool->devx_obj->obj,
6456                                                  mtr_free->offset,
6457                                                  (1 << MLX5_FLOW_COLOR_GREEN),
6458                                                  reg_id - REG_C_0);
6459 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6460                 if (!mtr_free->fm.meter_action) {
6461                         flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6462                         return 0;
6463                 }
6464         }
6465         return mtr_idx;
6466 }
6467
6468 /**
6469  * Verify the @p attributes will be correctly understood by the NIC and store
6470  * them in the @p flow if everything is correct.
6471  *
6472  * @param[in] dev
6473  *   Pointer to dev struct.
6474  * @param[in] attributes
6475  *   Pointer to flow attributes
6476  * @param[in] external
6477  *   This flow rule is created by request external to PMD.
6478  * @param[out] error
6479  *   Pointer to error structure.
6480  *
6481  * @return
6482  *   - 0 on success and non root table.
6483  *   - 1 on success and root table.
6484  *   - a negative errno value otherwise and rte_errno is set.
6485  */
6486 static int
6487 flow_dv_validate_attributes(struct rte_eth_dev *dev,
6488                             const struct mlx5_flow_tunnel *tunnel,
6489                             const struct rte_flow_attr *attributes,
6490                             const struct flow_grp_info *grp_info,
6491                             struct rte_flow_error *error)
6492 {
6493         struct mlx5_priv *priv = dev->data->dev_private;
6494         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6495         int ret = 0;
6496
6497 #ifndef HAVE_MLX5DV_DR
6498         RTE_SET_USED(tunnel);
6499         RTE_SET_USED(grp_info);
6500         if (attributes->group)
6501                 return rte_flow_error_set(error, ENOTSUP,
6502                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6503                                           NULL,
6504                                           "groups are not supported");
6505 #else
6506         uint32_t table = 0;
6507
6508         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6509                                        grp_info, error);
6510         if (ret)
6511                 return ret;
6512         if (!table)
6513                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6514 #endif
6515         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6516             attributes->priority > lowest_priority)
6517                 return rte_flow_error_set(error, ENOTSUP,
6518                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6519                                           NULL,
6520                                           "priority out of range");
6521         if (attributes->transfer) {
6522                 if (!priv->config.dv_esw_en)
6523                         return rte_flow_error_set
6524                                 (error, ENOTSUP,
6525                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6526                                  "E-Switch dr is not supported");
6527                 if (!(priv->representor || priv->master))
6528                         return rte_flow_error_set
6529                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6530                                  NULL, "E-Switch configuration can only be"
6531                                  " done by a master or a representor device");
6532                 if (attributes->egress)
6533                         return rte_flow_error_set
6534                                 (error, ENOTSUP,
6535                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6536                                  "egress is not supported");
6537         }
6538         if (!(attributes->egress ^ attributes->ingress))
6539                 return rte_flow_error_set(error, ENOTSUP,
6540                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6541                                           "must specify exactly one of "
6542                                           "ingress or egress");
6543         return ret;
6544 }
6545
6546 static uint16_t
6547 mlx5_flow_locate_proto_l3(const struct rte_flow_item **head,
6548                           const struct rte_flow_item *end)
6549 {
6550         const struct rte_flow_item *item = *head;
6551         uint16_t l3_protocol;
6552
6553         for (; item != end; item++) {
6554                 switch (item->type) {
6555                 default:
6556                         break;
6557                 case RTE_FLOW_ITEM_TYPE_IPV4:
6558                         l3_protocol = RTE_ETHER_TYPE_IPV4;
6559                         goto l3_ok;
6560                 case RTE_FLOW_ITEM_TYPE_IPV6:
6561                         l3_protocol = RTE_ETHER_TYPE_IPV6;
6562                         goto l3_ok;
6563                 case RTE_FLOW_ITEM_TYPE_ETH:
6564                         if (item->mask && item->spec) {
6565                                 MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_eth,
6566                                                             type, item,
6567                                                             l3_protocol);
6568                                 if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
6569                                     l3_protocol == RTE_ETHER_TYPE_IPV6)
6570                                         goto l3_ok;
6571                         }
6572                         break;
6573                 case RTE_FLOW_ITEM_TYPE_VLAN:
6574                         if (item->mask && item->spec) {
6575                                 MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_vlan,
6576                                                             inner_type, item,
6577                                                             l3_protocol);
6578                                 if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
6579                                     l3_protocol == RTE_ETHER_TYPE_IPV6)
6580                                         goto l3_ok;
6581                         }
6582                         break;
6583                 }
6584         }
6585         return 0;
6586 l3_ok:
6587         *head = item;
6588         return l3_protocol;
6589 }
6590
6591 static uint8_t
6592 mlx5_flow_locate_proto_l4(const struct rte_flow_item **head,
6593                           const struct rte_flow_item *end)
6594 {
6595         const struct rte_flow_item *item = *head;
6596         uint8_t l4_protocol;
6597
6598         for (; item != end; item++) {
6599                 switch (item->type) {
6600                 default:
6601                         break;
6602                 case RTE_FLOW_ITEM_TYPE_TCP:
6603                         l4_protocol = IPPROTO_TCP;
6604                         goto l4_ok;
6605                 case RTE_FLOW_ITEM_TYPE_UDP:
6606                         l4_protocol = IPPROTO_UDP;
6607                         goto l4_ok;
6608                 case RTE_FLOW_ITEM_TYPE_IPV4:
6609                         if (item->mask && item->spec) {
6610                                 const struct rte_flow_item_ipv4 *mask, *spec;
6611
6612                                 mask = (typeof(mask))item->mask;
6613                                 spec = (typeof(spec))item->spec;
6614                                 l4_protocol = mask->hdr.next_proto_id &
6615                                               spec->hdr.next_proto_id;
6616                                 if (l4_protocol == IPPROTO_TCP ||
6617                                     l4_protocol == IPPROTO_UDP)
6618                                         goto l4_ok;
6619                         }
6620                         break;
6621                 case RTE_FLOW_ITEM_TYPE_IPV6:
6622                         if (item->mask && item->spec) {
6623                                 const struct rte_flow_item_ipv6 *mask, *spec;
6624                                 mask = (typeof(mask))item->mask;
6625                                 spec = (typeof(spec))item->spec;
6626                                 l4_protocol = mask->hdr.proto & spec->hdr.proto;
6627                                 if (l4_protocol == IPPROTO_TCP ||
6628                                     l4_protocol == IPPROTO_UDP)
6629                                         goto l4_ok;
6630                         }
6631                         break;
6632                 }
6633         }
6634         return 0;
6635 l4_ok:
6636         *head = item;
6637         return l4_protocol;
6638 }
6639
6640 static int
6641 flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
6642                                 const struct rte_flow_item *rule_items,
6643                                 const struct rte_flow_item *integrity_item,
6644                                 struct rte_flow_error *error)
6645 {
6646         struct mlx5_priv *priv = dev->data->dev_private;
6647         const struct rte_flow_item *tunnel_item, *end_item, *item = rule_items;
6648         const struct rte_flow_item_integrity *mask = (typeof(mask))
6649                                                      integrity_item->mask;
6650         const struct rte_flow_item_integrity *spec = (typeof(spec))
6651                                                      integrity_item->spec;
6652         uint32_t protocol;
6653
6654         if (!priv->config.hca_attr.pkt_integrity_match)
6655                 return rte_flow_error_set(error, ENOTSUP,
6656                                           RTE_FLOW_ERROR_TYPE_ITEM,
6657                                           integrity_item,
6658                                           "packet integrity integrity_item not supported");
6659         if (!mask)
6660                 mask = &rte_flow_item_integrity_mask;
6661         if (!mlx5_validate_integrity_item(mask))
6662                 return rte_flow_error_set(error, ENOTSUP,
6663                                           RTE_FLOW_ERROR_TYPE_ITEM,
6664                                           integrity_item,
6665                                           "unsupported integrity filter");
6666         tunnel_item = mlx5_flow_find_tunnel_item(rule_items);
6667         if (spec->level > 1) {
6668                 if (!tunnel_item)
6669                         return rte_flow_error_set(error, ENOTSUP,
6670                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6671                                                   integrity_item,
6672                                                   "missing tunnel item");
6673                 item = tunnel_item;
6674                 end_item = mlx5_find_end_item(tunnel_item);
6675         } else {
6676                 end_item = tunnel_item ? tunnel_item :
6677                            mlx5_find_end_item(integrity_item);
6678         }
6679         if (mask->l3_ok || mask->ipv4_csum_ok) {
6680                 protocol = mlx5_flow_locate_proto_l3(&item, end_item);
6681                 if (!protocol)
6682                         return rte_flow_error_set(error, EINVAL,
6683                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6684                                                   integrity_item,
6685                                                   "missing L3 protocol");
6686         }
6687         if (mask->l4_ok || mask->l4_csum_ok) {
6688                 protocol = mlx5_flow_locate_proto_l4(&item, end_item);
6689                 if (!protocol)
6690                         return rte_flow_error_set(error, EINVAL,
6691                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6692                                                   integrity_item,
6693                                                   "missing L4 protocol");
6694         }
6695         return 0;
6696 }
6697
6698 /**
6699  * Internal validation function. For validating both actions and items.
6700  *
6701  * @param[in] dev
6702  *   Pointer to the rte_eth_dev structure.
6703  * @param[in] attr
6704  *   Pointer to the flow attributes.
6705  * @param[in] items
6706  *   Pointer to the list of items.
6707  * @param[in] actions
6708  *   Pointer to the list of actions.
6709  * @param[in] external
6710  *   This flow rule is created by request external to PMD.
6711  * @param[in] hairpin
6712  *   Number of hairpin TX actions, 0 means classic flow.
6713  * @param[out] error
6714  *   Pointer to the error structure.
6715  *
6716  * @return
6717  *   0 on success, a negative errno value otherwise and rte_errno is set.
6718  */
6719 static int
6720 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6721                  const struct rte_flow_item items[],
6722                  const struct rte_flow_action actions[],
6723                  bool external, int hairpin, struct rte_flow_error *error)
6724 {
6725         int ret;
6726         uint64_t action_flags = 0;
6727         uint64_t item_flags = 0;
6728         uint64_t last_item = 0;
6729         uint8_t next_protocol = 0xff;
6730         uint16_t ether_type = 0;
6731         int actions_n = 0;
6732         uint8_t item_ipv6_proto = 0;
6733         int fdb_mirror_limit = 0;
6734         int modify_after_mirror = 0;
6735         const struct rte_flow_item *geneve_item = NULL;
6736         const struct rte_flow_item *gre_item = NULL;
6737         const struct rte_flow_item *gtp_item = NULL;
6738         const struct rte_flow_action_raw_decap *decap;
6739         const struct rte_flow_action_raw_encap *encap;
6740         const struct rte_flow_action_rss *rss = NULL;
6741         const struct rte_flow_action_rss *sample_rss = NULL;
6742         const struct rte_flow_action_count *sample_count = NULL;
6743         const struct rte_flow_item_tcp nic_tcp_mask = {
6744                 .hdr = {
6745                         .tcp_flags = 0xFF,
6746                         .src_port = RTE_BE16(UINT16_MAX),
6747                         .dst_port = RTE_BE16(UINT16_MAX),
6748                 }
6749         };
6750         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6751                 .hdr = {
6752                         .src_addr =
6753                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6754                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6755                         .dst_addr =
6756                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6757                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6758                         .vtc_flow = RTE_BE32(0xffffffff),
6759                         .proto = 0xff,
6760                         .hop_limits = 0xff,
6761                 },
6762                 .has_frag_ext = 1,
6763         };
6764         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6765                 .hdr = {
6766                         .common = {
6767                                 .u32 =
6768                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6769                                         .type = 0xFF,
6770                                         }).u32),
6771                         },
6772                         .dummy[0] = 0xffffffff,
6773                 },
6774         };
6775         struct mlx5_priv *priv = dev->data->dev_private;
6776         struct mlx5_dev_config *dev_conf = &priv->config;
6777         uint16_t queue_index = 0xFFFF;
6778         const struct rte_flow_item_vlan *vlan_m = NULL;
6779         uint32_t rw_act_num = 0;
6780         uint64_t is_root;
6781         const struct mlx5_flow_tunnel *tunnel;
6782         enum mlx5_tof_rule_type tof_rule_type;
6783         struct flow_grp_info grp_info = {
6784                 .external = !!external,
6785                 .transfer = !!attr->transfer,
6786                 .fdb_def_rule = !!priv->fdb_def_rule,
6787                 .std_tbl_fix = true,
6788         };
6789         const struct rte_eth_hairpin_conf *conf;
6790         const struct rte_flow_item *rule_items = items;
6791         const struct rte_flow_item *port_id_item = NULL;
6792         bool def_policy = false;
6793
6794         if (items == NULL)
6795                 return -1;
6796         tunnel = is_tunnel_offload_active(dev) ?
6797                  mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
6798         if (tunnel) {
6799                 if (priv->representor)
6800                         return rte_flow_error_set
6801                                 (error, ENOTSUP,
6802                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6803                                  NULL, "decap not supported for VF representor");
6804                 if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
6805                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6806                 else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
6807                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6808                                         MLX5_FLOW_ACTION_DECAP;
6809                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6810                                         (dev, attr, tunnel, tof_rule_type);
6811         }
6812         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6813         if (ret < 0)
6814                 return ret;
6815         is_root = (uint64_t)ret;
6816         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6817                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6818                 int type = items->type;
6819
6820                 if (!mlx5_flow_os_item_supported(type))
6821                         return rte_flow_error_set(error, ENOTSUP,
6822                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6823                                                   NULL, "item not supported");
6824                 switch (type) {
6825                 case RTE_FLOW_ITEM_TYPE_VOID:
6826                         break;
6827                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6828                         ret = flow_dv_validate_item_port_id
6829                                         (dev, items, attr, item_flags, error);
6830                         if (ret < 0)
6831                                 return ret;
6832                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6833                         port_id_item = items;
6834                         break;
6835                 case RTE_FLOW_ITEM_TYPE_ETH:
6836                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6837                                                           true, error);
6838                         if (ret < 0)
6839                                 return ret;
6840                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6841                                              MLX5_FLOW_LAYER_OUTER_L2;
6842                         if (items->mask != NULL && items->spec != NULL) {
6843                                 ether_type =
6844                                         ((const struct rte_flow_item_eth *)
6845                                          items->spec)->type;
6846                                 ether_type &=
6847                                         ((const struct rte_flow_item_eth *)
6848                                          items->mask)->type;
6849                                 ether_type = rte_be_to_cpu_16(ether_type);
6850                         } else {
6851                                 ether_type = 0;
6852                         }
6853                         break;
6854                 case RTE_FLOW_ITEM_TYPE_VLAN:
6855                         ret = flow_dv_validate_item_vlan(items, item_flags,
6856                                                          dev, error);
6857                         if (ret < 0)
6858                                 return ret;
6859                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6860                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6861                         if (items->mask != NULL && items->spec != NULL) {
6862                                 ether_type =
6863                                         ((const struct rte_flow_item_vlan *)
6864                                          items->spec)->inner_type;
6865                                 ether_type &=
6866                                         ((const struct rte_flow_item_vlan *)
6867                                          items->mask)->inner_type;
6868                                 ether_type = rte_be_to_cpu_16(ether_type);
6869                         } else {
6870                                 ether_type = 0;
6871                         }
6872                         /* Store outer VLAN mask for of_push_vlan action. */
6873                         if (!tunnel)
6874                                 vlan_m = items->mask;
6875                         break;
6876                 case RTE_FLOW_ITEM_TYPE_IPV4:
6877                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6878                                                   &item_flags, &tunnel);
6879                         ret = flow_dv_validate_item_ipv4(items, item_flags,
6880                                                          last_item, ether_type,
6881                                                          error);
6882                         if (ret < 0)
6883                                 return ret;
6884                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
6885                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
6886                         if (items->mask != NULL &&
6887                             ((const struct rte_flow_item_ipv4 *)
6888                              items->mask)->hdr.next_proto_id) {
6889                                 next_protocol =
6890                                         ((const struct rte_flow_item_ipv4 *)
6891                                          (items->spec))->hdr.next_proto_id;
6892                                 next_protocol &=
6893                                         ((const struct rte_flow_item_ipv4 *)
6894                                          (items->mask))->hdr.next_proto_id;
6895                         } else {
6896                                 /* Reset for inner layer. */
6897                                 next_protocol = 0xff;
6898                         }
6899                         break;
6900                 case RTE_FLOW_ITEM_TYPE_IPV6:
6901                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6902                                                   &item_flags, &tunnel);
6903                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
6904                                                            last_item,
6905                                                            ether_type,
6906                                                            &nic_ipv6_mask,
6907                                                            error);
6908                         if (ret < 0)
6909                                 return ret;
6910                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
6911                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
6912                         if (items->mask != NULL &&
6913                             ((const struct rte_flow_item_ipv6 *)
6914                              items->mask)->hdr.proto) {
6915                                 item_ipv6_proto =
6916                                         ((const struct rte_flow_item_ipv6 *)
6917                                          items->spec)->hdr.proto;
6918                                 next_protocol =
6919                                         ((const struct rte_flow_item_ipv6 *)
6920                                          items->spec)->hdr.proto;
6921                                 next_protocol &=
6922                                         ((const struct rte_flow_item_ipv6 *)
6923                                          items->mask)->hdr.proto;
6924                         } else {
6925                                 /* Reset for inner layer. */
6926                                 next_protocol = 0xff;
6927                         }
6928                         break;
6929                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
6930                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
6931                                                                   item_flags,
6932                                                                   error);
6933                         if (ret < 0)
6934                                 return ret;
6935                         last_item = tunnel ?
6936                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
6937                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
6938                         if (items->mask != NULL &&
6939                             ((const struct rte_flow_item_ipv6_frag_ext *)
6940                              items->mask)->hdr.next_header) {
6941                                 next_protocol =
6942                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6943                                  items->spec)->hdr.next_header;
6944                                 next_protocol &=
6945                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6946                                  items->mask)->hdr.next_header;
6947                         } else {
6948                                 /* Reset for inner layer. */
6949                                 next_protocol = 0xff;
6950                         }
6951                         break;
6952                 case RTE_FLOW_ITEM_TYPE_TCP:
6953                         ret = mlx5_flow_validate_item_tcp
6954                                                 (items, item_flags,
6955                                                  next_protocol,
6956                                                  &nic_tcp_mask,
6957                                                  error);
6958                         if (ret < 0)
6959                                 return ret;
6960                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
6961                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
6962                         break;
6963                 case RTE_FLOW_ITEM_TYPE_UDP:
6964                         ret = mlx5_flow_validate_item_udp(items, item_flags,
6965                                                           next_protocol,
6966                                                           error);
6967                         if (ret < 0)
6968                                 return ret;
6969                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
6970                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
6971                         break;
6972                 case RTE_FLOW_ITEM_TYPE_GRE:
6973                         ret = mlx5_flow_validate_item_gre(items, item_flags,
6974                                                           next_protocol, error);
6975                         if (ret < 0)
6976                                 return ret;
6977                         gre_item = items;
6978                         last_item = MLX5_FLOW_LAYER_GRE;
6979                         break;
6980                 case RTE_FLOW_ITEM_TYPE_NVGRE:
6981                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
6982                                                             next_protocol,
6983                                                             error);
6984                         if (ret < 0)
6985                                 return ret;
6986                         last_item = MLX5_FLOW_LAYER_NVGRE;
6987                         break;
6988                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6989                         ret = mlx5_flow_validate_item_gre_key
6990                                 (items, item_flags, gre_item, error);
6991                         if (ret < 0)
6992                                 return ret;
6993                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
6994                         break;
6995                 case RTE_FLOW_ITEM_TYPE_VXLAN:
6996                         ret = mlx5_flow_validate_item_vxlan(dev, items,
6997                                                             item_flags, attr,
6998                                                             error);
6999                         if (ret < 0)
7000                                 return ret;
7001                         last_item = MLX5_FLOW_LAYER_VXLAN;
7002                         break;
7003                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7004                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
7005                                                                 item_flags, dev,
7006                                                                 error);
7007                         if (ret < 0)
7008                                 return ret;
7009                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
7010                         break;
7011                 case RTE_FLOW_ITEM_TYPE_GENEVE:
7012                         ret = mlx5_flow_validate_item_geneve(items,
7013                                                              item_flags, dev,
7014                                                              error);
7015                         if (ret < 0)
7016                                 return ret;
7017                         geneve_item = items;
7018                         last_item = MLX5_FLOW_LAYER_GENEVE;
7019                         break;
7020                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
7021                         ret = mlx5_flow_validate_item_geneve_opt(items,
7022                                                                  last_item,
7023                                                                  geneve_item,
7024                                                                  dev,
7025                                                                  error);
7026                         if (ret < 0)
7027                                 return ret;
7028                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
7029                         break;
7030                 case RTE_FLOW_ITEM_TYPE_MPLS:
7031                         ret = mlx5_flow_validate_item_mpls(dev, items,
7032                                                            item_flags,
7033                                                            last_item, error);
7034                         if (ret < 0)
7035                                 return ret;
7036                         last_item = MLX5_FLOW_LAYER_MPLS;
7037                         break;
7038
7039                 case RTE_FLOW_ITEM_TYPE_MARK:
7040                         ret = flow_dv_validate_item_mark(dev, items, attr,
7041                                                          error);
7042                         if (ret < 0)
7043                                 return ret;
7044                         last_item = MLX5_FLOW_ITEM_MARK;
7045                         break;
7046                 case RTE_FLOW_ITEM_TYPE_META:
7047                         ret = flow_dv_validate_item_meta(dev, items, attr,
7048                                                          error);
7049                         if (ret < 0)
7050                                 return ret;
7051                         last_item = MLX5_FLOW_ITEM_METADATA;
7052                         break;
7053                 case RTE_FLOW_ITEM_TYPE_ICMP:
7054                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
7055                                                            next_protocol,
7056                                                            error);
7057                         if (ret < 0)
7058                                 return ret;
7059                         last_item = MLX5_FLOW_LAYER_ICMP;
7060                         break;
7061                 case RTE_FLOW_ITEM_TYPE_ICMP6:
7062                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
7063                                                             next_protocol,
7064                                                             error);
7065                         if (ret < 0)
7066                                 return ret;
7067                         item_ipv6_proto = IPPROTO_ICMPV6;
7068                         last_item = MLX5_FLOW_LAYER_ICMP6;
7069                         break;
7070                 case RTE_FLOW_ITEM_TYPE_TAG:
7071                         ret = flow_dv_validate_item_tag(dev, items,
7072                                                         attr, error);
7073                         if (ret < 0)
7074                                 return ret;
7075                         last_item = MLX5_FLOW_ITEM_TAG;
7076                         break;
7077                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
7078                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
7079                         break;
7080                 case RTE_FLOW_ITEM_TYPE_GTP:
7081                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
7082                                                         error);
7083                         if (ret < 0)
7084                                 return ret;
7085                         gtp_item = items;
7086                         last_item = MLX5_FLOW_LAYER_GTP;
7087                         break;
7088                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
7089                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
7090                                                             gtp_item, attr,
7091                                                             error);
7092                         if (ret < 0)
7093                                 return ret;
7094                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
7095                         break;
7096                 case RTE_FLOW_ITEM_TYPE_ECPRI:
7097                         /* Capacity will be checked in the translate stage. */
7098                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
7099                                                             last_item,
7100                                                             ether_type,
7101                                                             &nic_ecpri_mask,
7102                                                             error);
7103                         if (ret < 0)
7104                                 return ret;
7105                         last_item = MLX5_FLOW_LAYER_ECPRI;
7106                         break;
7107                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
7108                         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY)
7109                                 return rte_flow_error_set
7110                                         (error, ENOTSUP,
7111                                          RTE_FLOW_ERROR_TYPE_ITEM,
7112                                          NULL, "multiple integrity items not supported");
7113                         ret = flow_dv_validate_item_integrity(dev, rule_items,
7114                                                               items, error);
7115                         if (ret < 0)
7116                                 return ret;
7117                         last_item = MLX5_FLOW_ITEM_INTEGRITY;
7118                         break;
7119                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
7120                         ret = flow_dv_validate_item_aso_ct(dev, items,
7121                                                            &item_flags, error);
7122                         if (ret < 0)
7123                                 return ret;
7124                         break;
7125                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
7126                         /* tunnel offload item was processed before
7127                          * list it here as a supported type
7128                          */
7129                         break;
7130                 default:
7131                         return rte_flow_error_set(error, ENOTSUP,
7132                                                   RTE_FLOW_ERROR_TYPE_ITEM,
7133                                                   NULL, "item not supported");
7134                 }
7135                 item_flags |= last_item;
7136         }
7137         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
7138                 int type = actions->type;
7139                 bool shared_count = false;
7140
7141                 if (!mlx5_flow_os_action_supported(type))
7142                         return rte_flow_error_set(error, ENOTSUP,
7143                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7144                                                   actions,
7145                                                   "action not supported");
7146                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
7147                         return rte_flow_error_set(error, ENOTSUP,
7148                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7149                                                   actions, "too many actions");
7150                 if (action_flags &
7151                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
7152                         return rte_flow_error_set(error, ENOTSUP,
7153                                 RTE_FLOW_ERROR_TYPE_ACTION,
7154                                 NULL, "meter action with policy "
7155                                 "must be the last action");
7156                 switch (type) {
7157                 case RTE_FLOW_ACTION_TYPE_VOID:
7158                         break;
7159                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7160                         ret = flow_dv_validate_action_port_id(dev,
7161                                                               action_flags,
7162                                                               actions,
7163                                                               attr,
7164                                                               error);
7165                         if (ret)
7166                                 return ret;
7167                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7168                         ++actions_n;
7169                         break;
7170                 case RTE_FLOW_ACTION_TYPE_FLAG:
7171                         ret = flow_dv_validate_action_flag(dev, action_flags,
7172                                                            attr, error);
7173                         if (ret < 0)
7174                                 return ret;
7175                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7176                                 /* Count all modify-header actions as one. */
7177                                 if (!(action_flags &
7178                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7179                                         ++actions_n;
7180                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
7181                                                 MLX5_FLOW_ACTION_MARK_EXT;
7182                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7183                                         modify_after_mirror = 1;
7184
7185                         } else {
7186                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
7187                                 ++actions_n;
7188                         }
7189                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7190                         break;
7191                 case RTE_FLOW_ACTION_TYPE_MARK:
7192                         ret = flow_dv_validate_action_mark(dev, actions,
7193                                                            action_flags,
7194                                                            attr, error);
7195                         if (ret < 0)
7196                                 return ret;
7197                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7198                                 /* Count all modify-header actions as one. */
7199                                 if (!(action_flags &
7200                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7201                                         ++actions_n;
7202                                 action_flags |= MLX5_FLOW_ACTION_MARK |
7203                                                 MLX5_FLOW_ACTION_MARK_EXT;
7204                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7205                                         modify_after_mirror = 1;
7206                         } else {
7207                                 action_flags |= MLX5_FLOW_ACTION_MARK;
7208                                 ++actions_n;
7209                         }
7210                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7211                         break;
7212                 case RTE_FLOW_ACTION_TYPE_SET_META:
7213                         ret = flow_dv_validate_action_set_meta(dev, actions,
7214                                                                action_flags,
7215                                                                attr, error);
7216                         if (ret < 0)
7217                                 return ret;
7218                         /* Count all modify-header actions as one action. */
7219                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7220                                 ++actions_n;
7221                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7222                                 modify_after_mirror = 1;
7223                         action_flags |= MLX5_FLOW_ACTION_SET_META;
7224                         rw_act_num += MLX5_ACT_NUM_SET_META;
7225                         break;
7226                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7227                         ret = flow_dv_validate_action_set_tag(dev, actions,
7228                                                               action_flags,
7229                                                               attr, error);
7230                         if (ret < 0)
7231                                 return ret;
7232                         /* Count all modify-header actions as one action. */
7233                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7234                                 ++actions_n;
7235                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7236                                 modify_after_mirror = 1;
7237                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7238                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7239                         break;
7240                 case RTE_FLOW_ACTION_TYPE_DROP:
7241                         ret = mlx5_flow_validate_action_drop(action_flags,
7242                                                              attr, error);
7243                         if (ret < 0)
7244                                 return ret;
7245                         action_flags |= MLX5_FLOW_ACTION_DROP;
7246                         ++actions_n;
7247                         break;
7248                 case RTE_FLOW_ACTION_TYPE_QUEUE:
7249                         ret = mlx5_flow_validate_action_queue(actions,
7250                                                               action_flags, dev,
7251                                                               attr, error);
7252                         if (ret < 0)
7253                                 return ret;
7254                         queue_index = ((const struct rte_flow_action_queue *)
7255                                                         (actions->conf))->index;
7256                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
7257                         ++actions_n;
7258                         break;
7259                 case RTE_FLOW_ACTION_TYPE_RSS:
7260                         rss = actions->conf;
7261                         ret = mlx5_flow_validate_action_rss(actions,
7262                                                             action_flags, dev,
7263                                                             attr, item_flags,
7264                                                             error);
7265                         if (ret < 0)
7266                                 return ret;
7267                         if (rss && sample_rss &&
7268                             (sample_rss->level != rss->level ||
7269                             sample_rss->types != rss->types))
7270                                 return rte_flow_error_set(error, ENOTSUP,
7271                                         RTE_FLOW_ERROR_TYPE_ACTION,
7272                                         NULL,
7273                                         "Can't use the different RSS types "
7274                                         "or level in the same flow");
7275                         if (rss != NULL && rss->queue_num)
7276                                 queue_index = rss->queue[0];
7277                         action_flags |= MLX5_FLOW_ACTION_RSS;
7278                         ++actions_n;
7279                         break;
7280                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7281                         ret =
7282                         mlx5_flow_validate_action_default_miss(action_flags,
7283                                         attr, error);
7284                         if (ret < 0)
7285                                 return ret;
7286                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7287                         ++actions_n;
7288                         break;
7289                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
7290                 case RTE_FLOW_ACTION_TYPE_COUNT:
7291                         shared_count = is_shared_action_count(actions);
7292                         ret = flow_dv_validate_action_count(dev, shared_count,
7293                                                             action_flags,
7294                                                             error);
7295                         if (ret < 0)
7296                                 return ret;
7297                         action_flags |= MLX5_FLOW_ACTION_COUNT;
7298                         ++actions_n;
7299                         break;
7300                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7301                         if (flow_dv_validate_action_pop_vlan(dev,
7302                                                              action_flags,
7303                                                              actions,
7304                                                              item_flags, attr,
7305                                                              error))
7306                                 return -rte_errno;
7307                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7308                                 modify_after_mirror = 1;
7309                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7310                         ++actions_n;
7311                         break;
7312                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7313                         ret = flow_dv_validate_action_push_vlan(dev,
7314                                                                 action_flags,
7315                                                                 vlan_m,
7316                                                                 actions, attr,
7317                                                                 error);
7318                         if (ret < 0)
7319                                 return ret;
7320                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7321                                 modify_after_mirror = 1;
7322                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7323                         ++actions_n;
7324                         break;
7325                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7326                         ret = flow_dv_validate_action_set_vlan_pcp
7327                                                 (action_flags, actions, error);
7328                         if (ret < 0)
7329                                 return ret;
7330                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7331                                 modify_after_mirror = 1;
7332                         /* Count PCP with push_vlan command. */
7333                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
7334                         break;
7335                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7336                         ret = flow_dv_validate_action_set_vlan_vid
7337                                                 (item_flags, action_flags,
7338                                                  actions, error);
7339                         if (ret < 0)
7340                                 return ret;
7341                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7342                                 modify_after_mirror = 1;
7343                         /* Count VID with push_vlan command. */
7344                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7345                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
7346                         break;
7347                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7348                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7349                         ret = flow_dv_validate_action_l2_encap(dev,
7350                                                                action_flags,
7351                                                                actions, attr,
7352                                                                error);
7353                         if (ret < 0)
7354                                 return ret;
7355                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
7356                         ++actions_n;
7357                         break;
7358                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7359                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7360                         ret = flow_dv_validate_action_decap(dev, action_flags,
7361                                                             actions, item_flags,
7362                                                             attr, error);
7363                         if (ret < 0)
7364                                 return ret;
7365                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7366                                 modify_after_mirror = 1;
7367                         action_flags |= MLX5_FLOW_ACTION_DECAP;
7368                         ++actions_n;
7369                         break;
7370                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7371                         ret = flow_dv_validate_action_raw_encap_decap
7372                                 (dev, NULL, actions->conf, attr, &action_flags,
7373                                  &actions_n, actions, item_flags, error);
7374                         if (ret < 0)
7375                                 return ret;
7376                         break;
7377                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7378                         decap = actions->conf;
7379                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
7380                                 ;
7381                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7382                                 encap = NULL;
7383                                 actions--;
7384                         } else {
7385                                 encap = actions->conf;
7386                         }
7387                         ret = flow_dv_validate_action_raw_encap_decap
7388                                            (dev,
7389                                             decap ? decap : &empty_decap, encap,
7390                                             attr, &action_flags, &actions_n,
7391                                             actions, item_flags, error);
7392                         if (ret < 0)
7393                                 return ret;
7394                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7395                             (action_flags & MLX5_FLOW_ACTION_DECAP))
7396                                 modify_after_mirror = 1;
7397                         break;
7398                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7399                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7400                         ret = flow_dv_validate_action_modify_mac(action_flags,
7401                                                                  actions,
7402                                                                  item_flags,
7403                                                                  error);
7404                         if (ret < 0)
7405                                 return ret;
7406                         /* Count all modify-header actions as one action. */
7407                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7408                                 ++actions_n;
7409                         action_flags |= actions->type ==
7410                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7411                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
7412                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
7413                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7414                                 modify_after_mirror = 1;
7415                         /*
7416                          * Even if the source and destination MAC addresses have
7417                          * overlap in the header with 4B alignment, the convert
7418                          * function will handle them separately and 4 SW actions
7419                          * will be created. And 2 actions will be added each
7420                          * time no matter how many bytes of address will be set.
7421                          */
7422                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
7423                         break;
7424                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7425                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7426                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
7427                                                                   actions,
7428                                                                   item_flags,
7429                                                                   error);
7430                         if (ret < 0)
7431                                 return ret;
7432                         /* Count all modify-header actions as one action. */
7433                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7434                                 ++actions_n;
7435                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7436                                 modify_after_mirror = 1;
7437                         action_flags |= actions->type ==
7438                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7439                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
7440                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
7441                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
7442                         break;
7443                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7444                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7445                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
7446                                                                   actions,
7447                                                                   item_flags,
7448                                                                   error);
7449                         if (ret < 0)
7450                                 return ret;
7451                         if (item_ipv6_proto == IPPROTO_ICMPV6)
7452                                 return rte_flow_error_set(error, ENOTSUP,
7453                                         RTE_FLOW_ERROR_TYPE_ACTION,
7454                                         actions,
7455                                         "Can't change header "
7456                                         "with ICMPv6 proto");
7457                         /* Count all modify-header actions as one action. */
7458                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7459                                 ++actions_n;
7460                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7461                                 modify_after_mirror = 1;
7462                         action_flags |= actions->type ==
7463                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7464                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7465                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
7466                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
7467                         break;
7468                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7469                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7470                         ret = flow_dv_validate_action_modify_tp(action_flags,
7471                                                                 actions,
7472                                                                 item_flags,
7473                                                                 error);
7474                         if (ret < 0)
7475                                 return ret;
7476                         /* Count all modify-header actions as one action. */
7477                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7478                                 ++actions_n;
7479                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7480                                 modify_after_mirror = 1;
7481                         action_flags |= actions->type ==
7482                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7483                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
7484                                                 MLX5_FLOW_ACTION_SET_TP_DST;
7485                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7486                         break;
7487                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7488                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7489                         ret = flow_dv_validate_action_modify_ttl(action_flags,
7490                                                                  actions,
7491                                                                  item_flags,
7492                                                                  error);
7493                         if (ret < 0)
7494                                 return ret;
7495                         /* Count all modify-header actions as one action. */
7496                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7497                                 ++actions_n;
7498                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7499                                 modify_after_mirror = 1;
7500                         action_flags |= actions->type ==
7501                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
7502                                                 MLX5_FLOW_ACTION_SET_TTL :
7503                                                 MLX5_FLOW_ACTION_DEC_TTL;
7504                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7505                         break;
7506                 case RTE_FLOW_ACTION_TYPE_JUMP:
7507                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7508                                                            action_flags,
7509                                                            attr, external,
7510                                                            error);
7511                         if (ret)
7512                                 return ret;
7513                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7514                             fdb_mirror_limit)
7515                                 return rte_flow_error_set(error, EINVAL,
7516                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7517                                                   NULL,
7518                                                   "sample and jump action combination is not supported");
7519                         ++actions_n;
7520                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7521                         break;
7522                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7523                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7524                         ret = flow_dv_validate_action_modify_tcp_seq
7525                                                                 (action_flags,
7526                                                                  actions,
7527                                                                  item_flags,
7528                                                                  error);
7529                         if (ret < 0)
7530                                 return ret;
7531                         /* Count all modify-header actions as one action. */
7532                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7533                                 ++actions_n;
7534                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7535                                 modify_after_mirror = 1;
7536                         action_flags |= actions->type ==
7537                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7538                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7539                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7540                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7541                         break;
7542                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7543                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7544                         ret = flow_dv_validate_action_modify_tcp_ack
7545                                                                 (action_flags,
7546                                                                  actions,
7547                                                                  item_flags,
7548                                                                  error);
7549                         if (ret < 0)
7550                                 return ret;
7551                         /* Count all modify-header actions as one action. */
7552                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7553                                 ++actions_n;
7554                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7555                                 modify_after_mirror = 1;
7556                         action_flags |= actions->type ==
7557                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7558                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
7559                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7560                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7561                         break;
7562                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7563                         break;
7564                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7565                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7566                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7567                         break;
7568                 case RTE_FLOW_ACTION_TYPE_METER:
7569                         ret = mlx5_flow_validate_action_meter(dev,
7570                                                               action_flags,
7571                                                               actions, attr,
7572                                                               port_id_item,
7573                                                               &def_policy,
7574                                                               error);
7575                         if (ret < 0)
7576                                 return ret;
7577                         action_flags |= MLX5_FLOW_ACTION_METER;
7578                         if (!def_policy)
7579                                 action_flags |=
7580                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7581                         ++actions_n;
7582                         /* Meter action will add one more TAG action. */
7583                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7584                         break;
7585                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7586                         if (!attr->transfer && !attr->group)
7587                                 return rte_flow_error_set(error, ENOTSUP,
7588                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7589                                                                            NULL,
7590                           "Shared ASO age action is not supported for group 0");
7591                         if (action_flags & MLX5_FLOW_ACTION_AGE)
7592                                 return rte_flow_error_set
7593                                                   (error, EINVAL,
7594                                                    RTE_FLOW_ERROR_TYPE_ACTION,
7595                                                    NULL,
7596                                                    "duplicate age actions set");
7597                         action_flags |= MLX5_FLOW_ACTION_AGE;
7598                         ++actions_n;
7599                         break;
7600                 case RTE_FLOW_ACTION_TYPE_AGE:
7601                         ret = flow_dv_validate_action_age(action_flags,
7602                                                           actions, dev,
7603                                                           error);
7604                         if (ret < 0)
7605                                 return ret;
7606                         /*
7607                          * Validate the regular AGE action (using counter)
7608                          * mutual exclusion with share counter actions.
7609                          */
7610                         if (!priv->sh->flow_hit_aso_en) {
7611                                 if (shared_count)
7612                                         return rte_flow_error_set
7613                                                 (error, EINVAL,
7614                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7615                                                 NULL,
7616                                                 "old age and shared count combination is not supported");
7617                                 if (sample_count)
7618                                         return rte_flow_error_set
7619                                                 (error, EINVAL,
7620                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7621                                                 NULL,
7622                                                 "old age action and count must be in the same sub flow");
7623                         }
7624                         action_flags |= MLX5_FLOW_ACTION_AGE;
7625                         ++actions_n;
7626                         break;
7627                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7628                         ret = flow_dv_validate_action_modify_ipv4_dscp
7629                                                          (action_flags,
7630                                                           actions,
7631                                                           item_flags,
7632                                                           error);
7633                         if (ret < 0)
7634                                 return ret;
7635                         /* Count all modify-header actions as one action. */
7636                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7637                                 ++actions_n;
7638                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7639                                 modify_after_mirror = 1;
7640                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7641                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7642                         break;
7643                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7644                         ret = flow_dv_validate_action_modify_ipv6_dscp
7645                                                                 (action_flags,
7646                                                                  actions,
7647                                                                  item_flags,
7648                                                                  error);
7649                         if (ret < 0)
7650                                 return ret;
7651                         /* Count all modify-header actions as one action. */
7652                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7653                                 ++actions_n;
7654                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7655                                 modify_after_mirror = 1;
7656                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7657                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7658                         break;
7659                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7660                         ret = flow_dv_validate_action_sample(&action_flags,
7661                                                              actions, dev,
7662                                                              attr, item_flags,
7663                                                              rss, &sample_rss,
7664                                                              &sample_count,
7665                                                              &fdb_mirror_limit,
7666                                                              error);
7667                         if (ret < 0)
7668                                 return ret;
7669                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7670                         ++actions_n;
7671                         break;
7672                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7673                         ret = flow_dv_validate_action_modify_field(dev,
7674                                                                    action_flags,
7675                                                                    actions,
7676                                                                    attr,
7677                                                                    error);
7678                         if (ret < 0)
7679                                 return ret;
7680                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7681                                 modify_after_mirror = 1;
7682                         /* Count all modify-header actions as one action. */
7683                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7684                                 ++actions_n;
7685                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7686                         rw_act_num += ret;
7687                         break;
7688                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7689                         ret = flow_dv_validate_action_aso_ct(dev, action_flags,
7690                                                              item_flags, attr,
7691                                                              error);
7692                         if (ret < 0)
7693                                 return ret;
7694                         action_flags |= MLX5_FLOW_ACTION_CT;
7695                         break;
7696                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7697                         /* tunnel offload action was processed before
7698                          * list it here as a supported type
7699                          */
7700                         break;
7701                 default:
7702                         return rte_flow_error_set(error, ENOTSUP,
7703                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7704                                                   actions,
7705                                                   "action not supported");
7706                 }
7707         }
7708         /*
7709          * Validate actions in flow rules
7710          * - Explicit decap action is prohibited by the tunnel offload API.
7711          * - Drop action in tunnel steer rule is prohibited by the API.
7712          * - Application cannot use MARK action because it's value can mask
7713          *   tunnel default miss nitification.
7714          * - JUMP in tunnel match rule has no support in current PMD
7715          *   implementation.
7716          * - TAG & META are reserved for future uses.
7717          */
7718         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7719                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
7720                                             MLX5_FLOW_ACTION_MARK     |
7721                                             MLX5_FLOW_ACTION_SET_TAG  |
7722                                             MLX5_FLOW_ACTION_SET_META |
7723                                             MLX5_FLOW_ACTION_DROP;
7724
7725                 if (action_flags & bad_actions_mask)
7726                         return rte_flow_error_set
7727                                         (error, EINVAL,
7728                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7729                                         "Invalid RTE action in tunnel "
7730                                         "set decap rule");
7731                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7732                         return rte_flow_error_set
7733                                         (error, EINVAL,
7734                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7735                                         "tunnel set decap rule must terminate "
7736                                         "with JUMP");
7737                 if (!attr->ingress)
7738                         return rte_flow_error_set
7739                                         (error, EINVAL,
7740                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7741                                         "tunnel flows for ingress traffic only");
7742         }
7743         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7744                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
7745                                             MLX5_FLOW_ACTION_MARK    |
7746                                             MLX5_FLOW_ACTION_SET_TAG |
7747                                             MLX5_FLOW_ACTION_SET_META;
7748
7749                 if (action_flags & bad_actions_mask)
7750                         return rte_flow_error_set
7751                                         (error, EINVAL,
7752                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7753                                         "Invalid RTE action in tunnel "
7754                                         "set match rule");
7755         }
7756         /*
7757          * Validate the drop action mutual exclusion with other actions.
7758          * Drop action is mutually-exclusive with any other action, except for
7759          * Count action.
7760          * Drop action compatibility with tunnel offload was already validated.
7761          */
7762         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7763                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
7764         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7765             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7766                 return rte_flow_error_set(error, EINVAL,
7767                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7768                                           "Drop action is mutually-exclusive "
7769                                           "with any other action, except for "
7770                                           "Count action");
7771         /* Eswitch has few restrictions on using items and actions */
7772         if (attr->transfer) {
7773                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7774                     action_flags & MLX5_FLOW_ACTION_FLAG)
7775                         return rte_flow_error_set(error, ENOTSUP,
7776                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7777                                                   NULL,
7778                                                   "unsupported action FLAG");
7779                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7780                     action_flags & MLX5_FLOW_ACTION_MARK)
7781                         return rte_flow_error_set(error, ENOTSUP,
7782                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7783                                                   NULL,
7784                                                   "unsupported action MARK");
7785                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7786                         return rte_flow_error_set(error, ENOTSUP,
7787                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7788                                                   NULL,
7789                                                   "unsupported action QUEUE");
7790                 if (action_flags & MLX5_FLOW_ACTION_RSS)
7791                         return rte_flow_error_set(error, ENOTSUP,
7792                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7793                                                   NULL,
7794                                                   "unsupported action RSS");
7795                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7796                         return rte_flow_error_set(error, EINVAL,
7797                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7798                                                   actions,
7799                                                   "no fate action is found");
7800         } else {
7801                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7802                         return rte_flow_error_set(error, EINVAL,
7803                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7804                                                   actions,
7805                                                   "no fate action is found");
7806         }
7807         /*
7808          * Continue validation for Xcap and VLAN actions.
7809          * If hairpin is working in explicit TX rule mode, there is no actions
7810          * splitting and the validation of hairpin ingress flow should be the
7811          * same as other standard flows.
7812          */
7813         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7814                              MLX5_FLOW_VLAN_ACTIONS)) &&
7815             (queue_index == 0xFFFF ||
7816              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7817              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7818              conf->tx_explicit != 0))) {
7819                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7820                     MLX5_FLOW_XCAP_ACTIONS)
7821                         return rte_flow_error_set(error, ENOTSUP,
7822                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7823                                                   NULL, "encap and decap "
7824                                                   "combination aren't supported");
7825                 if (!attr->transfer && attr->ingress) {
7826                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7827                                 return rte_flow_error_set
7828                                                 (error, ENOTSUP,
7829                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7830                                                  NULL, "encap is not supported"
7831                                                  " for ingress traffic");
7832                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7833                                 return rte_flow_error_set
7834                                                 (error, ENOTSUP,
7835                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7836                                                  NULL, "push VLAN action not "
7837                                                  "supported for ingress");
7838                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7839                                         MLX5_FLOW_VLAN_ACTIONS)
7840                                 return rte_flow_error_set
7841                                                 (error, ENOTSUP,
7842                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7843                                                  NULL, "no support for "
7844                                                  "multiple VLAN actions");
7845                 }
7846         }
7847         if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
7848                 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
7849                         ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
7850                         attr->ingress)
7851                         return rte_flow_error_set
7852                                 (error, ENOTSUP,
7853                                 RTE_FLOW_ERROR_TYPE_ACTION,
7854                                 NULL, "fate action not supported for "
7855                                 "meter with policy");
7856                 if (attr->egress) {
7857                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
7858                                 return rte_flow_error_set
7859                                         (error, ENOTSUP,
7860                                         RTE_FLOW_ERROR_TYPE_ACTION,
7861                                         NULL, "modify header action in egress "
7862                                         "cannot be done before meter action");
7863                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7864                                 return rte_flow_error_set
7865                                         (error, ENOTSUP,
7866                                         RTE_FLOW_ERROR_TYPE_ACTION,
7867                                         NULL, "encap action in egress "
7868                                         "cannot be done before meter action");
7869                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7870                                 return rte_flow_error_set
7871                                         (error, ENOTSUP,
7872                                         RTE_FLOW_ERROR_TYPE_ACTION,
7873                                         NULL, "push vlan action in egress "
7874                                         "cannot be done before meter action");
7875                 }
7876         }
7877         /*
7878          * Hairpin flow will add one more TAG action in TX implicit mode.
7879          * In TX explicit mode, there will be no hairpin flow ID.
7880          */
7881         if (hairpin > 0)
7882                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7883         /* extra metadata enabled: one more TAG action will be add. */
7884         if (dev_conf->dv_flow_en &&
7885             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
7886             mlx5_flow_ext_mreg_supported(dev))
7887                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7888         if (rw_act_num >
7889                         flow_dv_modify_hdr_action_max(dev, is_root)) {
7890                 return rte_flow_error_set(error, ENOTSUP,
7891                                           RTE_FLOW_ERROR_TYPE_ACTION,
7892                                           NULL, "too many header modify"
7893                                           " actions to support");
7894         }
7895         /* Eswitch egress mirror and modify flow has limitation on CX5 */
7896         if (fdb_mirror_limit && modify_after_mirror)
7897                 return rte_flow_error_set(error, EINVAL,
7898                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7899                                 "sample before modify action is not supported");
7900         return 0;
7901 }
7902
7903 /**
7904  * Internal preparation function. Allocates the DV flow size,
7905  * this size is constant.
7906  *
7907  * @param[in] dev
7908  *   Pointer to the rte_eth_dev structure.
7909  * @param[in] attr
7910  *   Pointer to the flow attributes.
7911  * @param[in] items
7912  *   Pointer to the list of items.
7913  * @param[in] actions
7914  *   Pointer to the list of actions.
7915  * @param[out] error
7916  *   Pointer to the error structure.
7917  *
7918  * @return
7919  *   Pointer to mlx5_flow object on success,
7920  *   otherwise NULL and rte_errno is set.
7921  */
7922 static struct mlx5_flow *
7923 flow_dv_prepare(struct rte_eth_dev *dev,
7924                 const struct rte_flow_attr *attr __rte_unused,
7925                 const struct rte_flow_item items[] __rte_unused,
7926                 const struct rte_flow_action actions[] __rte_unused,
7927                 struct rte_flow_error *error)
7928 {
7929         uint32_t handle_idx = 0;
7930         struct mlx5_flow *dev_flow;
7931         struct mlx5_flow_handle *dev_handle;
7932         struct mlx5_priv *priv = dev->data->dev_private;
7933         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
7934
7935         MLX5_ASSERT(wks);
7936         wks->skip_matcher_reg = 0;
7937         wks->policy = NULL;
7938         wks->final_policy = NULL;
7939         /* In case of corrupting the memory. */
7940         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
7941                 rte_flow_error_set(error, ENOSPC,
7942                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7943                                    "not free temporary device flow");
7944                 return NULL;
7945         }
7946         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
7947                                    &handle_idx);
7948         if (!dev_handle) {
7949                 rte_flow_error_set(error, ENOMEM,
7950                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7951                                    "not enough memory to create flow handle");
7952                 return NULL;
7953         }
7954         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
7955         dev_flow = &wks->flows[wks->flow_idx++];
7956         memset(dev_flow, 0, sizeof(*dev_flow));
7957         dev_flow->handle = dev_handle;
7958         dev_flow->handle_idx = handle_idx;
7959         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
7960         dev_flow->ingress = attr->ingress;
7961         dev_flow->dv.transfer = attr->transfer;
7962         return dev_flow;
7963 }
7964
7965 #ifdef RTE_LIBRTE_MLX5_DEBUG
7966 /**
7967  * Sanity check for match mask and value. Similar to check_valid_spec() in
7968  * kernel driver. If unmasked bit is present in value, it returns failure.
7969  *
7970  * @param match_mask
7971  *   pointer to match mask buffer.
7972  * @param match_value
7973  *   pointer to match value buffer.
7974  *
7975  * @return
7976  *   0 if valid, -EINVAL otherwise.
7977  */
7978 static int
7979 flow_dv_check_valid_spec(void *match_mask, void *match_value)
7980 {
7981         uint8_t *m = match_mask;
7982         uint8_t *v = match_value;
7983         unsigned int i;
7984
7985         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
7986                 if (v[i] & ~m[i]) {
7987                         DRV_LOG(ERR,
7988                                 "match_value differs from match_criteria"
7989                                 " %p[%u] != %p[%u]",
7990                                 match_value, i, match_mask, i);
7991                         return -EINVAL;
7992                 }
7993         }
7994         return 0;
7995 }
7996 #endif
7997
7998 /**
7999  * Add match of ip_version.
8000  *
8001  * @param[in] group
8002  *   Flow group.
8003  * @param[in] headers_v
8004  *   Values header pointer.
8005  * @param[in] headers_m
8006  *   Masks header pointer.
8007  * @param[in] ip_version
8008  *   The IP version to set.
8009  */
8010 static inline void
8011 flow_dv_set_match_ip_version(uint32_t group,
8012                              void *headers_v,
8013                              void *headers_m,
8014                              uint8_t ip_version)
8015 {
8016         if (group == 0)
8017                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
8018         else
8019                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
8020                          ip_version);
8021         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
8022         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
8023         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
8024 }
8025
8026 /**
8027  * Add Ethernet item to matcher and to the value.
8028  *
8029  * @param[in, out] matcher
8030  *   Flow matcher.
8031  * @param[in, out] key
8032  *   Flow matcher value.
8033  * @param[in] item
8034  *   Flow pattern to translate.
8035  * @param[in] inner
8036  *   Item is inner pattern.
8037  */
8038 static void
8039 flow_dv_translate_item_eth(void *matcher, void *key,
8040                            const struct rte_flow_item *item, int inner,
8041                            uint32_t group)
8042 {
8043         const struct rte_flow_item_eth *eth_m = item->mask;
8044         const struct rte_flow_item_eth *eth_v = item->spec;
8045         const struct rte_flow_item_eth nic_mask = {
8046                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8047                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8048                 .type = RTE_BE16(0xffff),
8049                 .has_vlan = 0,
8050         };
8051         void *hdrs_m;
8052         void *hdrs_v;
8053         char *l24_v;
8054         unsigned int i;
8055
8056         if (!eth_v)
8057                 return;
8058         if (!eth_m)
8059                 eth_m = &nic_mask;
8060         if (inner) {
8061                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8062                                          inner_headers);
8063                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8064         } else {
8065                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8066                                          outer_headers);
8067                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8068         }
8069         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
8070                &eth_m->dst, sizeof(eth_m->dst));
8071         /* The value must be in the range of the mask. */
8072         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
8073         for (i = 0; i < sizeof(eth_m->dst); ++i)
8074                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
8075         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
8076                &eth_m->src, sizeof(eth_m->src));
8077         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
8078         /* The value must be in the range of the mask. */
8079         for (i = 0; i < sizeof(eth_m->dst); ++i)
8080                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
8081         /*
8082          * HW supports match on one Ethertype, the Ethertype following the last
8083          * VLAN tag of the packet (see PRM).
8084          * Set match on ethertype only if ETH header is not followed by VLAN.
8085          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8086          * ethertype, and use ip_version field instead.
8087          * eCPRI over Ether layer will use type value 0xAEFE.
8088          */
8089         if (eth_m->type == 0xFFFF) {
8090                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
8091                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8092                 switch (eth_v->type) {
8093                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8094                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8095                         return;
8096                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
8097                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8098                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8099                         return;
8100                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8101                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8102                         return;
8103                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8104                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8105                         return;
8106                 default:
8107                         break;
8108                 }
8109         }
8110         if (eth_m->has_vlan) {
8111                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8112                 if (eth_v->has_vlan) {
8113                         /*
8114                          * Here, when also has_more_vlan field in VLAN item is
8115                          * not set, only single-tagged packets will be matched.
8116                          */
8117                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8118                         return;
8119                 }
8120         }
8121         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8122                  rte_be_to_cpu_16(eth_m->type));
8123         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
8124         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
8125 }
8126
8127 /**
8128  * Add VLAN item to matcher and to the value.
8129  *
8130  * @param[in, out] dev_flow
8131  *   Flow descriptor.
8132  * @param[in, out] matcher
8133  *   Flow matcher.
8134  * @param[in, out] key
8135  *   Flow matcher value.
8136  * @param[in] item
8137  *   Flow pattern to translate.
8138  * @param[in] inner
8139  *   Item is inner pattern.
8140  */
8141 static void
8142 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
8143                             void *matcher, void *key,
8144                             const struct rte_flow_item *item,
8145                             int inner, uint32_t group)
8146 {
8147         const struct rte_flow_item_vlan *vlan_m = item->mask;
8148         const struct rte_flow_item_vlan *vlan_v = item->spec;
8149         void *hdrs_m;
8150         void *hdrs_v;
8151         uint16_t tci_m;
8152         uint16_t tci_v;
8153
8154         if (inner) {
8155                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8156                                          inner_headers);
8157                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8158         } else {
8159                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8160                                          outer_headers);
8161                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8162                 /*
8163                  * This is workaround, masks are not supported,
8164                  * and pre-validated.
8165                  */
8166                 if (vlan_v)
8167                         dev_flow->handle->vf_vlan.tag =
8168                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
8169         }
8170         /*
8171          * When VLAN item exists in flow, mark packet as tagged,
8172          * even if TCI is not specified.
8173          */
8174         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
8175                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8176                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8177         }
8178         if (!vlan_v)
8179                 return;
8180         if (!vlan_m)
8181                 vlan_m = &rte_flow_item_vlan_mask;
8182         tci_m = rte_be_to_cpu_16(vlan_m->tci);
8183         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
8184         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
8185         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
8186         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
8187         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
8188         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
8189         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
8190         /*
8191          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8192          * ethertype, and use ip_version field instead.
8193          */
8194         if (vlan_m->inner_type == 0xFFFF) {
8195                 switch (vlan_v->inner_type) {
8196                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8197                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8198                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8199                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8200                         return;
8201                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8202                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8203                         return;
8204                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8205                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8206                         return;
8207                 default:
8208                         break;
8209                 }
8210         }
8211         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
8212                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8213                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8214                 /* Only one vlan_tag bit can be set. */
8215                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8216                 return;
8217         }
8218         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8219                  rte_be_to_cpu_16(vlan_m->inner_type));
8220         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
8221                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
8222 }
8223
8224 /**
8225  * Add IPV4 item to matcher and to the value.
8226  *
8227  * @param[in, out] matcher
8228  *   Flow matcher.
8229  * @param[in, out] key
8230  *   Flow matcher value.
8231  * @param[in] item
8232  *   Flow pattern to translate.
8233  * @param[in] inner
8234  *   Item is inner pattern.
8235  * @param[in] group
8236  *   The group to insert the rule.
8237  */
8238 static void
8239 flow_dv_translate_item_ipv4(void *matcher, void *key,
8240                             const struct rte_flow_item *item,
8241                             int inner, uint32_t group)
8242 {
8243         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
8244         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
8245         const struct rte_flow_item_ipv4 nic_mask = {
8246                 .hdr = {
8247                         .src_addr = RTE_BE32(0xffffffff),
8248                         .dst_addr = RTE_BE32(0xffffffff),
8249                         .type_of_service = 0xff,
8250                         .next_proto_id = 0xff,
8251                         .time_to_live = 0xff,
8252                 },
8253         };
8254         void *headers_m;
8255         void *headers_v;
8256         char *l24_m;
8257         char *l24_v;
8258         uint8_t tos;
8259
8260         if (inner) {
8261                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8262                                          inner_headers);
8263                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8264         } else {
8265                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8266                                          outer_headers);
8267                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8268         }
8269         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
8270         if (!ipv4_v)
8271                 return;
8272         if (!ipv4_m)
8273                 ipv4_m = &nic_mask;
8274         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8275                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8276         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8277                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8278         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
8279         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
8280         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8281                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8282         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8283                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8284         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
8285         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
8286         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
8287         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
8288                  ipv4_m->hdr.type_of_service);
8289         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
8290         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
8291                  ipv4_m->hdr.type_of_service >> 2);
8292         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
8293         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8294                  ipv4_m->hdr.next_proto_id);
8295         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8296                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
8297         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8298                  ipv4_m->hdr.time_to_live);
8299         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8300                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
8301         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8302                  !!(ipv4_m->hdr.fragment_offset));
8303         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8304                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
8305 }
8306
8307 /**
8308  * Add IPV6 item to matcher and to the value.
8309  *
8310  * @param[in, out] matcher
8311  *   Flow matcher.
8312  * @param[in, out] key
8313  *   Flow matcher value.
8314  * @param[in] item
8315  *   Flow pattern to translate.
8316  * @param[in] inner
8317  *   Item is inner pattern.
8318  * @param[in] group
8319  *   The group to insert the rule.
8320  */
8321 static void
8322 flow_dv_translate_item_ipv6(void *matcher, void *key,
8323                             const struct rte_flow_item *item,
8324                             int inner, uint32_t group)
8325 {
8326         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
8327         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
8328         const struct rte_flow_item_ipv6 nic_mask = {
8329                 .hdr = {
8330                         .src_addr =
8331                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8332                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8333                         .dst_addr =
8334                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8335                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8336                         .vtc_flow = RTE_BE32(0xffffffff),
8337                         .proto = 0xff,
8338                         .hop_limits = 0xff,
8339                 },
8340         };
8341         void *headers_m;
8342         void *headers_v;
8343         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8344         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8345         char *l24_m;
8346         char *l24_v;
8347         uint32_t vtc_m;
8348         uint32_t vtc_v;
8349         int i;
8350         int size;
8351
8352         if (inner) {
8353                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8354                                          inner_headers);
8355                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8356         } else {
8357                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8358                                          outer_headers);
8359                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8360         }
8361         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
8362         if (!ipv6_v)
8363                 return;
8364         if (!ipv6_m)
8365                 ipv6_m = &nic_mask;
8366         size = sizeof(ipv6_m->hdr.dst_addr);
8367         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8368                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8369         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8370                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8371         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
8372         for (i = 0; i < size; ++i)
8373                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
8374         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8375                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8376         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8377                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8378         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
8379         for (i = 0; i < size; ++i)
8380                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
8381         /* TOS. */
8382         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
8383         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
8384         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
8385         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
8386         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
8387         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
8388         /* Label. */
8389         if (inner) {
8390                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
8391                          vtc_m);
8392                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
8393                          vtc_v);
8394         } else {
8395                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
8396                          vtc_m);
8397                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
8398                          vtc_v);
8399         }
8400         /* Protocol. */
8401         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8402                  ipv6_m->hdr.proto);
8403         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8404                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
8405         /* Hop limit. */
8406         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8407                  ipv6_m->hdr.hop_limits);
8408         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8409                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
8410         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8411                  !!(ipv6_m->has_frag_ext));
8412         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8413                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
8414 }
8415
8416 /**
8417  * Add IPV6 fragment extension item to matcher and to the value.
8418  *
8419  * @param[in, out] matcher
8420  *   Flow matcher.
8421  * @param[in, out] key
8422  *   Flow matcher value.
8423  * @param[in] item
8424  *   Flow pattern to translate.
8425  * @param[in] inner
8426  *   Item is inner pattern.
8427  */
8428 static void
8429 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
8430                                      const struct rte_flow_item *item,
8431                                      int inner)
8432 {
8433         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
8434         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
8435         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
8436                 .hdr = {
8437                         .next_header = 0xff,
8438                         .frag_data = RTE_BE16(0xffff),
8439                 },
8440         };
8441         void *headers_m;
8442         void *headers_v;
8443
8444         if (inner) {
8445                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8446                                          inner_headers);
8447                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8448         } else {
8449                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8450                                          outer_headers);
8451                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8452         }
8453         /* IPv6 fragment extension item exists, so packet is IP fragment. */
8454         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
8455         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
8456         if (!ipv6_frag_ext_v)
8457                 return;
8458         if (!ipv6_frag_ext_m)
8459                 ipv6_frag_ext_m = &nic_mask;
8460         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8461                  ipv6_frag_ext_m->hdr.next_header);
8462         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8463                  ipv6_frag_ext_v->hdr.next_header &
8464                  ipv6_frag_ext_m->hdr.next_header);
8465 }
8466
8467 /**
8468  * Add TCP item to matcher and to the value.
8469  *
8470  * @param[in, out] matcher
8471  *   Flow matcher.
8472  * @param[in, out] key
8473  *   Flow matcher value.
8474  * @param[in] item
8475  *   Flow pattern to translate.
8476  * @param[in] inner
8477  *   Item is inner pattern.
8478  */
8479 static void
8480 flow_dv_translate_item_tcp(void *matcher, void *key,
8481                            const struct rte_flow_item *item,
8482                            int inner)
8483 {
8484         const struct rte_flow_item_tcp *tcp_m = item->mask;
8485         const struct rte_flow_item_tcp *tcp_v = item->spec;
8486         void *headers_m;
8487         void *headers_v;
8488
8489         if (inner) {
8490                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8491                                          inner_headers);
8492                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8493         } else {
8494                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8495                                          outer_headers);
8496                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8497         }
8498         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8499         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8500         if (!tcp_v)
8501                 return;
8502         if (!tcp_m)
8503                 tcp_m = &rte_flow_item_tcp_mask;
8504         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8505                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
8506         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8507                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8508         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8509                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8510         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8511                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8512         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8513                  tcp_m->hdr.tcp_flags);
8514         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8515                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8516 }
8517
8518 /**
8519  * Add UDP item to matcher and to the value.
8520  *
8521  * @param[in, out] matcher
8522  *   Flow matcher.
8523  * @param[in, out] key
8524  *   Flow matcher value.
8525  * @param[in] item
8526  *   Flow pattern to translate.
8527  * @param[in] inner
8528  *   Item is inner pattern.
8529  */
8530 static void
8531 flow_dv_translate_item_udp(void *matcher, void *key,
8532                            const struct rte_flow_item *item,
8533                            int inner)
8534 {
8535         const struct rte_flow_item_udp *udp_m = item->mask;
8536         const struct rte_flow_item_udp *udp_v = item->spec;
8537         void *headers_m;
8538         void *headers_v;
8539
8540         if (inner) {
8541                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8542                                          inner_headers);
8543                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8544         } else {
8545                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8546                                          outer_headers);
8547                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8548         }
8549         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8550         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8551         if (!udp_v)
8552                 return;
8553         if (!udp_m)
8554                 udp_m = &rte_flow_item_udp_mask;
8555         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8556                  rte_be_to_cpu_16(udp_m->hdr.src_port));
8557         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8558                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8559         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8560                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
8561         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8562                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8563 }
8564
8565 /**
8566  * Add GRE optional Key item to matcher and to the value.
8567  *
8568  * @param[in, out] matcher
8569  *   Flow matcher.
8570  * @param[in, out] key
8571  *   Flow matcher value.
8572  * @param[in] item
8573  *   Flow pattern to translate.
8574  * @param[in] inner
8575  *   Item is inner pattern.
8576  */
8577 static void
8578 flow_dv_translate_item_gre_key(void *matcher, void *key,
8579                                    const struct rte_flow_item *item)
8580 {
8581         const rte_be32_t *key_m = item->mask;
8582         const rte_be32_t *key_v = item->spec;
8583         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8584         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8585         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8586
8587         /* GRE K bit must be on and should already be validated */
8588         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8589         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8590         if (!key_v)
8591                 return;
8592         if (!key_m)
8593                 key_m = &gre_key_default_mask;
8594         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8595                  rte_be_to_cpu_32(*key_m) >> 8);
8596         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8597                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8598         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8599                  rte_be_to_cpu_32(*key_m) & 0xFF);
8600         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8601                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8602 }
8603
8604 /**
8605  * Add GRE item to matcher and to the value.
8606  *
8607  * @param[in, out] matcher
8608  *   Flow matcher.
8609  * @param[in, out] key
8610  *   Flow matcher value.
8611  * @param[in] item
8612  *   Flow pattern to translate.
8613  * @param[in] inner
8614  *   Item is inner pattern.
8615  */
8616 static void
8617 flow_dv_translate_item_gre(void *matcher, void *key,
8618                            const struct rte_flow_item *item,
8619                            int inner)
8620 {
8621         const struct rte_flow_item_gre *gre_m = item->mask;
8622         const struct rte_flow_item_gre *gre_v = item->spec;
8623         void *headers_m;
8624         void *headers_v;
8625         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8626         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8627         struct {
8628                 union {
8629                         __extension__
8630                         struct {
8631                                 uint16_t version:3;
8632                                 uint16_t rsvd0:9;
8633                                 uint16_t s_present:1;
8634                                 uint16_t k_present:1;
8635                                 uint16_t rsvd_bit1:1;
8636                                 uint16_t c_present:1;
8637                         };
8638                         uint16_t value;
8639                 };
8640         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8641
8642         if (inner) {
8643                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8644                                          inner_headers);
8645                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8646         } else {
8647                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8648                                          outer_headers);
8649                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8650         }
8651         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8652         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8653         if (!gre_v)
8654                 return;
8655         if (!gre_m)
8656                 gre_m = &rte_flow_item_gre_mask;
8657         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
8658                  rte_be_to_cpu_16(gre_m->protocol));
8659         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8660                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
8661         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
8662         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
8663         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
8664                  gre_crks_rsvd0_ver_m.c_present);
8665         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
8666                  gre_crks_rsvd0_ver_v.c_present &
8667                  gre_crks_rsvd0_ver_m.c_present);
8668         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
8669                  gre_crks_rsvd0_ver_m.k_present);
8670         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
8671                  gre_crks_rsvd0_ver_v.k_present &
8672                  gre_crks_rsvd0_ver_m.k_present);
8673         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
8674                  gre_crks_rsvd0_ver_m.s_present);
8675         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
8676                  gre_crks_rsvd0_ver_v.s_present &
8677                  gre_crks_rsvd0_ver_m.s_present);
8678 }
8679
8680 /**
8681  * Add NVGRE item to matcher and to the value.
8682  *
8683  * @param[in, out] matcher
8684  *   Flow matcher.
8685  * @param[in, out] key
8686  *   Flow matcher value.
8687  * @param[in] item
8688  *   Flow pattern to translate.
8689  * @param[in] inner
8690  *   Item is inner pattern.
8691  */
8692 static void
8693 flow_dv_translate_item_nvgre(void *matcher, void *key,
8694                              const struct rte_flow_item *item,
8695                              int inner)
8696 {
8697         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
8698         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
8699         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8700         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8701         const char *tni_flow_id_m;
8702         const char *tni_flow_id_v;
8703         char *gre_key_m;
8704         char *gre_key_v;
8705         int size;
8706         int i;
8707
8708         /* For NVGRE, GRE header fields must be set with defined values. */
8709         const struct rte_flow_item_gre gre_spec = {
8710                 .c_rsvd0_ver = RTE_BE16(0x2000),
8711                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
8712         };
8713         const struct rte_flow_item_gre gre_mask = {
8714                 .c_rsvd0_ver = RTE_BE16(0xB000),
8715                 .protocol = RTE_BE16(UINT16_MAX),
8716         };
8717         const struct rte_flow_item gre_item = {
8718                 .spec = &gre_spec,
8719                 .mask = &gre_mask,
8720                 .last = NULL,
8721         };
8722         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
8723         if (!nvgre_v)
8724                 return;
8725         if (!nvgre_m)
8726                 nvgre_m = &rte_flow_item_nvgre_mask;
8727         tni_flow_id_m = (const char *)nvgre_m->tni;
8728         tni_flow_id_v = (const char *)nvgre_v->tni;
8729         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
8730         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
8731         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
8732         memcpy(gre_key_m, tni_flow_id_m, size);
8733         for (i = 0; i < size; ++i)
8734                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
8735 }
8736
8737 /**
8738  * Add VXLAN item to matcher and to the value.
8739  *
8740  * @param[in] dev
8741  *   Pointer to the Ethernet device structure.
8742  * @param[in] attr
8743  *   Flow rule attributes.
8744  * @param[in, out] matcher
8745  *   Flow matcher.
8746  * @param[in, out] key
8747  *   Flow matcher value.
8748  * @param[in] item
8749  *   Flow pattern to translate.
8750  * @param[in] inner
8751  *   Item is inner pattern.
8752  */
8753 static void
8754 flow_dv_translate_item_vxlan(struct rte_eth_dev *dev,
8755                              const struct rte_flow_attr *attr,
8756                              void *matcher, void *key,
8757                              const struct rte_flow_item *item,
8758                              int inner)
8759 {
8760         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
8761         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
8762         void *headers_m;
8763         void *headers_v;
8764         void *misc5_m;
8765         void *misc5_v;
8766         uint32_t *tunnel_header_v;
8767         uint32_t *tunnel_header_m;
8768         uint16_t dport;
8769         struct mlx5_priv *priv = dev->data->dev_private;
8770         const struct rte_flow_item_vxlan nic_mask = {
8771                 .vni = "\xff\xff\xff",
8772                 .rsvd1 = 0xff,
8773         };
8774
8775         if (inner) {
8776                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8777                                          inner_headers);
8778                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8779         } else {
8780                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8781                                          outer_headers);
8782                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8783         }
8784         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8785                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8786         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8787                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8788                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8789         }
8790         if (!vxlan_v)
8791                 return;
8792         if (!vxlan_m) {
8793                 if ((!attr->group && !priv->sh->tunnel_header_0_1) ||
8794                     (attr->group && !priv->sh->misc5_cap))
8795                         vxlan_m = &rte_flow_item_vxlan_mask;
8796                 else
8797                         vxlan_m = &nic_mask;
8798         }
8799         if ((!attr->group && !attr->transfer && !priv->sh->tunnel_header_0_1) ||
8800             ((attr->group || attr->transfer) && !priv->sh->misc5_cap)) {
8801                 void *misc_m;
8802                 void *misc_v;
8803                 char *vni_m;
8804                 char *vni_v;
8805                 int size;
8806                 int i;
8807                 misc_m = MLX5_ADDR_OF(fte_match_param,
8808                                       matcher, misc_parameters);
8809                 misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8810                 size = sizeof(vxlan_m->vni);
8811                 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8812                 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8813                 memcpy(vni_m, vxlan_m->vni, size);
8814                 for (i = 0; i < size; ++i)
8815                         vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8816                 return;
8817         }
8818         misc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5);
8819         misc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);
8820         tunnel_header_v = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8821                                                    misc5_v,
8822                                                    tunnel_header_1);
8823         tunnel_header_m = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8824                                                    misc5_m,
8825                                                    tunnel_header_1);
8826         *tunnel_header_v = (vxlan_v->vni[0] & vxlan_m->vni[0]) |
8827                            (vxlan_v->vni[1] & vxlan_m->vni[1]) << 8 |
8828                            (vxlan_v->vni[2] & vxlan_m->vni[2]) << 16;
8829         if (*tunnel_header_v)
8830                 *tunnel_header_m = vxlan_m->vni[0] |
8831                         vxlan_m->vni[1] << 8 |
8832                         vxlan_m->vni[2] << 16;
8833         else
8834                 *tunnel_header_m = 0x0;
8835         *tunnel_header_v |= (vxlan_v->rsvd1 & vxlan_m->rsvd1) << 24;
8836         if (vxlan_v->rsvd1 & vxlan_m->rsvd1)
8837                 *tunnel_header_m |= vxlan_m->rsvd1 << 24;
8838 }
8839
8840 /**
8841  * Add VXLAN-GPE item to matcher and to the value.
8842  *
8843  * @param[in, out] matcher
8844  *   Flow matcher.
8845  * @param[in, out] key
8846  *   Flow matcher value.
8847  * @param[in] item
8848  *   Flow pattern to translate.
8849  * @param[in] inner
8850  *   Item is inner pattern.
8851  */
8852
8853 static void
8854 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
8855                                  const struct rte_flow_item *item, int inner)
8856 {
8857         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
8858         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
8859         void *headers_m;
8860         void *headers_v;
8861         void *misc_m =
8862                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
8863         void *misc_v =
8864                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8865         char *vni_m;
8866         char *vni_v;
8867         uint16_t dport;
8868         int size;
8869         int i;
8870         uint8_t flags_m = 0xff;
8871         uint8_t flags_v = 0xc;
8872
8873         if (inner) {
8874                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8875                                          inner_headers);
8876                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8877         } else {
8878                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8879                                          outer_headers);
8880                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8881         }
8882         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8883                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8884         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8885                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8886                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8887         }
8888         if (!vxlan_v)
8889                 return;
8890         if (!vxlan_m)
8891                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
8892         size = sizeof(vxlan_m->vni);
8893         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
8894         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
8895         memcpy(vni_m, vxlan_m->vni, size);
8896         for (i = 0; i < size; ++i)
8897                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8898         if (vxlan_m->flags) {
8899                 flags_m = vxlan_m->flags;
8900                 flags_v = vxlan_v->flags;
8901         }
8902         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
8903         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
8904         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
8905                  vxlan_m->protocol);
8906         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
8907                  vxlan_v->protocol);
8908 }
8909
8910 /**
8911  * Add Geneve item to matcher and to the value.
8912  *
8913  * @param[in, out] matcher
8914  *   Flow matcher.
8915  * @param[in, out] key
8916  *   Flow matcher value.
8917  * @param[in] item
8918  *   Flow pattern to translate.
8919  * @param[in] inner
8920  *   Item is inner pattern.
8921  */
8922
8923 static void
8924 flow_dv_translate_item_geneve(void *matcher, void *key,
8925                               const struct rte_flow_item *item, int inner)
8926 {
8927         const struct rte_flow_item_geneve *geneve_m = item->mask;
8928         const struct rte_flow_item_geneve *geneve_v = item->spec;
8929         void *headers_m;
8930         void *headers_v;
8931         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8932         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8933         uint16_t dport;
8934         uint16_t gbhdr_m;
8935         uint16_t gbhdr_v;
8936         char *vni_m;
8937         char *vni_v;
8938         size_t size, i;
8939
8940         if (inner) {
8941                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8942                                          inner_headers);
8943                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8944         } else {
8945                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8946                                          outer_headers);
8947                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8948         }
8949         dport = MLX5_UDP_PORT_GENEVE;
8950         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8951                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8952                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8953         }
8954         if (!geneve_v)
8955                 return;
8956         if (!geneve_m)
8957                 geneve_m = &rte_flow_item_geneve_mask;
8958         size = sizeof(geneve_m->vni);
8959         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
8960         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
8961         memcpy(vni_m, geneve_m->vni, size);
8962         for (i = 0; i < size; ++i)
8963                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
8964         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
8965                  rte_be_to_cpu_16(geneve_m->protocol));
8966         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
8967                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
8968         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
8969         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
8970         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
8971                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8972         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
8973                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8974         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8975                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8976         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8977                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
8978                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8979 }
8980
8981 /**
8982  * Create Geneve TLV option resource.
8983  *
8984  * @param dev[in, out]
8985  *   Pointer to rte_eth_dev structure.
8986  * @param[in, out] tag_be24
8987  *   Tag value in big endian then R-shift 8.
8988  * @parm[in, out] dev_flow
8989  *   Pointer to the dev_flow.
8990  * @param[out] error
8991  *   pointer to error structure.
8992  *
8993  * @return
8994  *   0 on success otherwise -errno and errno is set.
8995  */
8996
8997 int
8998 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
8999                                              const struct rte_flow_item *item,
9000                                              struct rte_flow_error *error)
9001 {
9002         struct mlx5_priv *priv = dev->data->dev_private;
9003         struct mlx5_dev_ctx_shared *sh = priv->sh;
9004         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
9005                         sh->geneve_tlv_option_resource;
9006         struct mlx5_devx_obj *obj;
9007         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9008         int ret = 0;
9009
9010         if (!geneve_opt_v)
9011                 return -1;
9012         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
9013         if (geneve_opt_resource != NULL) {
9014                 if (geneve_opt_resource->option_class ==
9015                         geneve_opt_v->option_class &&
9016                         geneve_opt_resource->option_type ==
9017                         geneve_opt_v->option_type &&
9018                         geneve_opt_resource->length ==
9019                         geneve_opt_v->option_len) {
9020                         /* We already have GENVE TLV option obj allocated. */
9021                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
9022                                            __ATOMIC_RELAXED);
9023                 } else {
9024                         ret = rte_flow_error_set(error, ENOMEM,
9025                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9026                                 "Only one GENEVE TLV option supported");
9027                         goto exit;
9028                 }
9029         } else {
9030                 /* Create a GENEVE TLV object and resource. */
9031                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
9032                                 geneve_opt_v->option_class,
9033                                 geneve_opt_v->option_type,
9034                                 geneve_opt_v->option_len);
9035                 if (!obj) {
9036                         ret = rte_flow_error_set(error, ENODATA,
9037                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9038                                 "Failed to create GENEVE TLV Devx object");
9039                         goto exit;
9040                 }
9041                 sh->geneve_tlv_option_resource =
9042                                 mlx5_malloc(MLX5_MEM_ZERO,
9043                                                 sizeof(*geneve_opt_resource),
9044                                                 0, SOCKET_ID_ANY);
9045                 if (!sh->geneve_tlv_option_resource) {
9046                         claim_zero(mlx5_devx_cmd_destroy(obj));
9047                         ret = rte_flow_error_set(error, ENOMEM,
9048                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9049                                 "GENEVE TLV object memory allocation failed");
9050                         goto exit;
9051                 }
9052                 geneve_opt_resource = sh->geneve_tlv_option_resource;
9053                 geneve_opt_resource->obj = obj;
9054                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
9055                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
9056                 geneve_opt_resource->length = geneve_opt_v->option_len;
9057                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
9058                                 __ATOMIC_RELAXED);
9059         }
9060 exit:
9061         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
9062         return ret;
9063 }
9064
9065 /**
9066  * Add Geneve TLV option item to matcher.
9067  *
9068  * @param[in, out] dev
9069  *   Pointer to rte_eth_dev structure.
9070  * @param[in, out] matcher
9071  *   Flow matcher.
9072  * @param[in, out] key
9073  *   Flow matcher value.
9074  * @param[in] item
9075  *   Flow pattern to translate.
9076  * @param[out] error
9077  *   Pointer to error structure.
9078  */
9079 static int
9080 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
9081                                   void *key, const struct rte_flow_item *item,
9082                                   struct rte_flow_error *error)
9083 {
9084         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
9085         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9086         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9087         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9088         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9089                         misc_parameters_3);
9090         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9091         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
9092         int ret = 0;
9093
9094         if (!geneve_opt_v)
9095                 return -1;
9096         if (!geneve_opt_m)
9097                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
9098         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
9099                                                            error);
9100         if (ret) {
9101                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
9102                 return ret;
9103         }
9104         /*
9105          * Set the option length in GENEVE header if not requested.
9106          * The GENEVE TLV option length is expressed by the option length field
9107          * in the GENEVE header.
9108          * If the option length was not requested but the GENEVE TLV option item
9109          * is present we set the option length field implicitly.
9110          */
9111         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
9112                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9113                          MLX5_GENEVE_OPTLEN_MASK);
9114                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9115                          geneve_opt_v->option_len + 1);
9116         }
9117         /* Set the data. */
9118         if (geneve_opt_v->data) {
9119                 memcpy(&opt_data_key, geneve_opt_v->data,
9120                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9121                                 sizeof(opt_data_key)));
9122                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9123                                 sizeof(opt_data_key));
9124                 memcpy(&opt_data_mask, geneve_opt_m->data,
9125                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9126                                 sizeof(opt_data_mask)));
9127                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9128                                 sizeof(opt_data_mask));
9129                 MLX5_SET(fte_match_set_misc3, misc3_m,
9130                                 geneve_tlv_option_0_data,
9131                                 rte_be_to_cpu_32(opt_data_mask));
9132                 MLX5_SET(fte_match_set_misc3, misc3_v,
9133                                 geneve_tlv_option_0_data,
9134                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
9135         }
9136         return ret;
9137 }
9138
9139 /**
9140  * Add MPLS item to matcher and to the value.
9141  *
9142  * @param[in, out] matcher
9143  *   Flow matcher.
9144  * @param[in, out] key
9145  *   Flow matcher value.
9146  * @param[in] item
9147  *   Flow pattern to translate.
9148  * @param[in] prev_layer
9149  *   The protocol layer indicated in previous item.
9150  * @param[in] inner
9151  *   Item is inner pattern.
9152  */
9153 static void
9154 flow_dv_translate_item_mpls(void *matcher, void *key,
9155                             const struct rte_flow_item *item,
9156                             uint64_t prev_layer,
9157                             int inner)
9158 {
9159         const uint32_t *in_mpls_m = item->mask;
9160         const uint32_t *in_mpls_v = item->spec;
9161         uint32_t *out_mpls_m = 0;
9162         uint32_t *out_mpls_v = 0;
9163         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9164         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9165         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
9166                                      misc_parameters_2);
9167         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9168         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9169         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9170
9171         switch (prev_layer) {
9172         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9173                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
9174                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9175                          MLX5_UDP_PORT_MPLS);
9176                 break;
9177         case MLX5_FLOW_LAYER_GRE:
9178                 /* Fall-through. */
9179         case MLX5_FLOW_LAYER_GRE_KEY:
9180                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
9181                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
9182                          RTE_ETHER_TYPE_MPLS);
9183                 break;
9184         default:
9185                 break;
9186         }
9187         if (!in_mpls_v)
9188                 return;
9189         if (!in_mpls_m)
9190                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
9191         switch (prev_layer) {
9192         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9193                 out_mpls_m =
9194                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9195                                                  outer_first_mpls_over_udp);
9196                 out_mpls_v =
9197                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9198                                                  outer_first_mpls_over_udp);
9199                 break;
9200         case MLX5_FLOW_LAYER_GRE:
9201                 out_mpls_m =
9202                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9203                                                  outer_first_mpls_over_gre);
9204                 out_mpls_v =
9205                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9206                                                  outer_first_mpls_over_gre);
9207                 break;
9208         default:
9209                 /* Inner MPLS not over GRE is not supported. */
9210                 if (!inner) {
9211                         out_mpls_m =
9212                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9213                                                          misc2_m,
9214                                                          outer_first_mpls);
9215                         out_mpls_v =
9216                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9217                                                          misc2_v,
9218                                                          outer_first_mpls);
9219                 }
9220                 break;
9221         }
9222         if (out_mpls_m && out_mpls_v) {
9223                 *out_mpls_m = *in_mpls_m;
9224                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
9225         }
9226 }
9227
9228 /**
9229  * Add metadata register item to matcher
9230  *
9231  * @param[in, out] matcher
9232  *   Flow matcher.
9233  * @param[in, out] key
9234  *   Flow matcher value.
9235  * @param[in] reg_type
9236  *   Type of device metadata register
9237  * @param[in] value
9238  *   Register value
9239  * @param[in] mask
9240  *   Register mask
9241  */
9242 static void
9243 flow_dv_match_meta_reg(void *matcher, void *key,
9244                        enum modify_reg reg_type,
9245                        uint32_t data, uint32_t mask)
9246 {
9247         void *misc2_m =
9248                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
9249         void *misc2_v =
9250                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9251         uint32_t temp;
9252
9253         data &= mask;
9254         switch (reg_type) {
9255         case REG_A:
9256                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
9257                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
9258                 break;
9259         case REG_B:
9260                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
9261                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
9262                 break;
9263         case REG_C_0:
9264                 /*
9265                  * The metadata register C0 field might be divided into
9266                  * source vport index and META item value, we should set
9267                  * this field according to specified mask, not as whole one.
9268                  */
9269                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
9270                 temp |= mask;
9271                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
9272                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
9273                 temp &= ~mask;
9274                 temp |= data;
9275                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
9276                 break;
9277         case REG_C_1:
9278                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
9279                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
9280                 break;
9281         case REG_C_2:
9282                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
9283                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
9284                 break;
9285         case REG_C_3:
9286                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
9287                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
9288                 break;
9289         case REG_C_4:
9290                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
9291                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
9292                 break;
9293         case REG_C_5:
9294                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
9295                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
9296                 break;
9297         case REG_C_6:
9298                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
9299                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
9300                 break;
9301         case REG_C_7:
9302                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
9303                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
9304                 break;
9305         default:
9306                 MLX5_ASSERT(false);
9307                 break;
9308         }
9309 }
9310
9311 /**
9312  * Add MARK item to matcher
9313  *
9314  * @param[in] dev
9315  *   The device to configure through.
9316  * @param[in, out] matcher
9317  *   Flow matcher.
9318  * @param[in, out] key
9319  *   Flow matcher value.
9320  * @param[in] item
9321  *   Flow pattern to translate.
9322  */
9323 static void
9324 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
9325                             void *matcher, void *key,
9326                             const struct rte_flow_item *item)
9327 {
9328         struct mlx5_priv *priv = dev->data->dev_private;
9329         const struct rte_flow_item_mark *mark;
9330         uint32_t value;
9331         uint32_t mask;
9332
9333         mark = item->mask ? (const void *)item->mask :
9334                             &rte_flow_item_mark_mask;
9335         mask = mark->id & priv->sh->dv_mark_mask;
9336         mark = (const void *)item->spec;
9337         MLX5_ASSERT(mark);
9338         value = mark->id & priv->sh->dv_mark_mask & mask;
9339         if (mask) {
9340                 enum modify_reg reg;
9341
9342                 /* Get the metadata register index for the mark. */
9343                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
9344                 MLX5_ASSERT(reg > 0);
9345                 if (reg == REG_C_0) {
9346                         struct mlx5_priv *priv = dev->data->dev_private;
9347                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9348                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9349
9350                         mask &= msk_c0;
9351                         mask <<= shl_c0;
9352                         value <<= shl_c0;
9353                 }
9354                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9355         }
9356 }
9357
9358 /**
9359  * Add META item to matcher
9360  *
9361  * @param[in] dev
9362  *   The devich to configure through.
9363  * @param[in, out] matcher
9364  *   Flow matcher.
9365  * @param[in, out] key
9366  *   Flow matcher value.
9367  * @param[in] attr
9368  *   Attributes of flow that includes this item.
9369  * @param[in] item
9370  *   Flow pattern to translate.
9371  */
9372 static void
9373 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
9374                             void *matcher, void *key,
9375                             const struct rte_flow_attr *attr,
9376                             const struct rte_flow_item *item)
9377 {
9378         const struct rte_flow_item_meta *meta_m;
9379         const struct rte_flow_item_meta *meta_v;
9380
9381         meta_m = (const void *)item->mask;
9382         if (!meta_m)
9383                 meta_m = &rte_flow_item_meta_mask;
9384         meta_v = (const void *)item->spec;
9385         if (meta_v) {
9386                 int reg;
9387                 uint32_t value = meta_v->data;
9388                 uint32_t mask = meta_m->data;
9389
9390                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
9391                 if (reg < 0)
9392                         return;
9393                 MLX5_ASSERT(reg != REG_NON);
9394                 if (reg == REG_C_0) {
9395                         struct mlx5_priv *priv = dev->data->dev_private;
9396                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9397                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9398
9399                         mask &= msk_c0;
9400                         mask <<= shl_c0;
9401                         value <<= shl_c0;
9402                 }
9403                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9404         }
9405 }
9406
9407 /**
9408  * Add vport metadata Reg C0 item to matcher
9409  *
9410  * @param[in, out] matcher
9411  *   Flow matcher.
9412  * @param[in, out] key
9413  *   Flow matcher value.
9414  * @param[in] reg
9415  *   Flow pattern to translate.
9416  */
9417 static void
9418 flow_dv_translate_item_meta_vport(void *matcher, void *key,
9419                                   uint32_t value, uint32_t mask)
9420 {
9421         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
9422 }
9423
9424 /**
9425  * Add tag item to matcher
9426  *
9427  * @param[in] dev
9428  *   The devich to configure through.
9429  * @param[in, out] matcher
9430  *   Flow matcher.
9431  * @param[in, out] key
9432  *   Flow matcher value.
9433  * @param[in] item
9434  *   Flow pattern to translate.
9435  */
9436 static void
9437 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
9438                                 void *matcher, void *key,
9439                                 const struct rte_flow_item *item)
9440 {
9441         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
9442         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
9443         uint32_t mask, value;
9444
9445         MLX5_ASSERT(tag_v);
9446         value = tag_v->data;
9447         mask = tag_m ? tag_m->data : UINT32_MAX;
9448         if (tag_v->id == REG_C_0) {
9449                 struct mlx5_priv *priv = dev->data->dev_private;
9450                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9451                 uint32_t shl_c0 = rte_bsf32(msk_c0);
9452
9453                 mask &= msk_c0;
9454                 mask <<= shl_c0;
9455                 value <<= shl_c0;
9456         }
9457         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
9458 }
9459
9460 /**
9461  * Add TAG item to matcher
9462  *
9463  * @param[in] dev
9464  *   The devich to configure through.
9465  * @param[in, out] matcher
9466  *   Flow matcher.
9467  * @param[in, out] key
9468  *   Flow matcher value.
9469  * @param[in] item
9470  *   Flow pattern to translate.
9471  */
9472 static void
9473 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
9474                            void *matcher, void *key,
9475                            const struct rte_flow_item *item)
9476 {
9477         const struct rte_flow_item_tag *tag_v = item->spec;
9478         const struct rte_flow_item_tag *tag_m = item->mask;
9479         enum modify_reg reg;
9480
9481         MLX5_ASSERT(tag_v);
9482         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
9483         /* Get the metadata register index for the tag. */
9484         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
9485         MLX5_ASSERT(reg > 0);
9486         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
9487 }
9488
9489 /**
9490  * Add source vport match to the specified matcher.
9491  *
9492  * @param[in, out] matcher
9493  *   Flow matcher.
9494  * @param[in, out] key
9495  *   Flow matcher value.
9496  * @param[in] port
9497  *   Source vport value to match
9498  * @param[in] mask
9499  *   Mask
9500  */
9501 static void
9502 flow_dv_translate_item_source_vport(void *matcher, void *key,
9503                                     int16_t port, uint16_t mask)
9504 {
9505         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9506         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9507
9508         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
9509         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
9510 }
9511
9512 /**
9513  * Translate port-id item to eswitch match on  port-id.
9514  *
9515  * @param[in] dev
9516  *   The devich to configure through.
9517  * @param[in, out] matcher
9518  *   Flow matcher.
9519  * @param[in, out] key
9520  *   Flow matcher value.
9521  * @param[in] item
9522  *   Flow pattern to translate.
9523  * @param[in]
9524  *   Flow attributes.
9525  *
9526  * @return
9527  *   0 on success, a negative errno value otherwise.
9528  */
9529 static int
9530 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
9531                                void *key, const struct rte_flow_item *item,
9532                                const struct rte_flow_attr *attr)
9533 {
9534         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
9535         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
9536         struct mlx5_priv *priv;
9537         uint16_t mask, id;
9538
9539         mask = pid_m ? pid_m->id : 0xffff;
9540         id = pid_v ? pid_v->id : dev->data->port_id;
9541         priv = mlx5_port_to_eswitch_info(id, item == NULL);
9542         if (!priv)
9543                 return -rte_errno;
9544         /*
9545          * Translate to vport field or to metadata, depending on mode.
9546          * Kernel can use either misc.source_port or half of C0 metadata
9547          * register.
9548          */
9549         if (priv->vport_meta_mask) {
9550                 /*
9551                  * Provide the hint for SW steering library
9552                  * to insert the flow into ingress domain and
9553                  * save the extra vport match.
9554                  */
9555                 if (mask == 0xffff && priv->vport_id == 0xffff &&
9556                     priv->pf_bond < 0 && attr->transfer)
9557                         flow_dv_translate_item_source_vport
9558                                 (matcher, key, priv->vport_id, mask);
9559                 /*
9560                  * We should always set the vport metadata register,
9561                  * otherwise the SW steering library can drop
9562                  * the rule if wire vport metadata value is not zero,
9563                  * it depends on kernel configuration.
9564                  */
9565                 flow_dv_translate_item_meta_vport(matcher, key,
9566                                                   priv->vport_meta_tag,
9567                                                   priv->vport_meta_mask);
9568         } else {
9569                 flow_dv_translate_item_source_vport(matcher, key,
9570                                                     priv->vport_id, mask);
9571         }
9572         return 0;
9573 }
9574
9575 /**
9576  * Add ICMP6 item to matcher and to the value.
9577  *
9578  * @param[in, out] matcher
9579  *   Flow matcher.
9580  * @param[in, out] key
9581  *   Flow matcher value.
9582  * @param[in] item
9583  *   Flow pattern to translate.
9584  * @param[in] inner
9585  *   Item is inner pattern.
9586  */
9587 static void
9588 flow_dv_translate_item_icmp6(void *matcher, void *key,
9589                               const struct rte_flow_item *item,
9590                               int inner)
9591 {
9592         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
9593         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
9594         void *headers_m;
9595         void *headers_v;
9596         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9597                                      misc_parameters_3);
9598         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9599         if (inner) {
9600                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9601                                          inner_headers);
9602                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9603         } else {
9604                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9605                                          outer_headers);
9606                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9607         }
9608         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9609         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
9610         if (!icmp6_v)
9611                 return;
9612         if (!icmp6_m)
9613                 icmp6_m = &rte_flow_item_icmp6_mask;
9614         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
9615         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
9616                  icmp6_v->type & icmp6_m->type);
9617         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
9618         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
9619                  icmp6_v->code & icmp6_m->code);
9620 }
9621
9622 /**
9623  * Add ICMP item to matcher and to the value.
9624  *
9625  * @param[in, out] matcher
9626  *   Flow matcher.
9627  * @param[in, out] key
9628  *   Flow matcher value.
9629  * @param[in] item
9630  *   Flow pattern to translate.
9631  * @param[in] inner
9632  *   Item is inner pattern.
9633  */
9634 static void
9635 flow_dv_translate_item_icmp(void *matcher, void *key,
9636                             const struct rte_flow_item *item,
9637                             int inner)
9638 {
9639         const struct rte_flow_item_icmp *icmp_m = item->mask;
9640         const struct rte_flow_item_icmp *icmp_v = item->spec;
9641         uint32_t icmp_header_data_m = 0;
9642         uint32_t icmp_header_data_v = 0;
9643         void *headers_m;
9644         void *headers_v;
9645         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9646                                      misc_parameters_3);
9647         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9648         if (inner) {
9649                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9650                                          inner_headers);
9651                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9652         } else {
9653                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9654                                          outer_headers);
9655                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9656         }
9657         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9658         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
9659         if (!icmp_v)
9660                 return;
9661         if (!icmp_m)
9662                 icmp_m = &rte_flow_item_icmp_mask;
9663         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
9664                  icmp_m->hdr.icmp_type);
9665         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
9666                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
9667         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
9668                  icmp_m->hdr.icmp_code);
9669         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
9670                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
9671         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
9672         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
9673         if (icmp_header_data_m) {
9674                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
9675                 icmp_header_data_v |=
9676                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
9677                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
9678                          icmp_header_data_m);
9679                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
9680                          icmp_header_data_v & icmp_header_data_m);
9681         }
9682 }
9683
9684 /**
9685  * Add GTP item to matcher and to the value.
9686  *
9687  * @param[in, out] matcher
9688  *   Flow matcher.
9689  * @param[in, out] key
9690  *   Flow matcher value.
9691  * @param[in] item
9692  *   Flow pattern to translate.
9693  * @param[in] inner
9694  *   Item is inner pattern.
9695  */
9696 static void
9697 flow_dv_translate_item_gtp(void *matcher, void *key,
9698                            const struct rte_flow_item *item, int inner)
9699 {
9700         const struct rte_flow_item_gtp *gtp_m = item->mask;
9701         const struct rte_flow_item_gtp *gtp_v = item->spec;
9702         void *headers_m;
9703         void *headers_v;
9704         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9705                                      misc_parameters_3);
9706         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9707         uint16_t dport = RTE_GTPU_UDP_PORT;
9708
9709         if (inner) {
9710                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9711                                          inner_headers);
9712                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9713         } else {
9714                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9715                                          outer_headers);
9716                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9717         }
9718         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9719                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9720                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9721         }
9722         if (!gtp_v)
9723                 return;
9724         if (!gtp_m)
9725                 gtp_m = &rte_flow_item_gtp_mask;
9726         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
9727                  gtp_m->v_pt_rsv_flags);
9728         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
9729                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
9730         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
9731         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
9732                  gtp_v->msg_type & gtp_m->msg_type);
9733         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
9734                  rte_be_to_cpu_32(gtp_m->teid));
9735         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
9736                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
9737 }
9738
9739 /**
9740  * Add GTP PSC item to matcher.
9741  *
9742  * @param[in, out] matcher
9743  *   Flow matcher.
9744  * @param[in, out] key
9745  *   Flow matcher value.
9746  * @param[in] item
9747  *   Flow pattern to translate.
9748  */
9749 static int
9750 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
9751                                const struct rte_flow_item *item)
9752 {
9753         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
9754         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
9755         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9756                         misc_parameters_3);
9757         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9758         union {
9759                 uint32_t w32;
9760                 struct {
9761                         uint16_t seq_num;
9762                         uint8_t npdu_num;
9763                         uint8_t next_ext_header_type;
9764                 };
9765         } dw_2;
9766         uint8_t gtp_flags;
9767
9768         /* Always set E-flag match on one, regardless of GTP item settings. */
9769         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
9770         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9771         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
9772         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
9773         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9774         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
9775         /*Set next extension header type. */
9776         dw_2.seq_num = 0;
9777         dw_2.npdu_num = 0;
9778         dw_2.next_ext_header_type = 0xff;
9779         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
9780                  rte_cpu_to_be_32(dw_2.w32));
9781         dw_2.seq_num = 0;
9782         dw_2.npdu_num = 0;
9783         dw_2.next_ext_header_type = 0x85;
9784         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
9785                  rte_cpu_to_be_32(dw_2.w32));
9786         if (gtp_psc_v) {
9787                 union {
9788                         uint32_t w32;
9789                         struct {
9790                                 uint8_t len;
9791                                 uint8_t type_flags;
9792                                 uint8_t qfi;
9793                                 uint8_t reserved;
9794                         };
9795                 } dw_0;
9796
9797                 /*Set extension header PDU type and Qos. */
9798                 if (!gtp_psc_m)
9799                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
9800                 dw_0.w32 = 0;
9801                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type);
9802                 dw_0.qfi = gtp_psc_m->qfi;
9803                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
9804                          rte_cpu_to_be_32(dw_0.w32));
9805                 dw_0.w32 = 0;
9806                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type &
9807                                                         gtp_psc_m->pdu_type);
9808                 dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi;
9809                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9810                          rte_cpu_to_be_32(dw_0.w32));
9811         }
9812         return 0;
9813 }
9814
9815 /**
9816  * Add eCPRI item to matcher and to the value.
9817  *
9818  * @param[in] dev
9819  *   The devich to configure through.
9820  * @param[in, out] matcher
9821  *   Flow matcher.
9822  * @param[in, out] key
9823  *   Flow matcher value.
9824  * @param[in] item
9825  *   Flow pattern to translate.
9826  * @param[in] samples
9827  *   Sample IDs to be used in the matching.
9828  */
9829 static void
9830 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
9831                              void *key, const struct rte_flow_item *item)
9832 {
9833         struct mlx5_priv *priv = dev->data->dev_private;
9834         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
9835         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
9836         struct rte_ecpri_common_hdr common;
9837         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
9838                                      misc_parameters_4);
9839         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
9840         uint32_t *samples;
9841         void *dw_m;
9842         void *dw_v;
9843
9844         if (!ecpri_v)
9845                 return;
9846         if (!ecpri_m)
9847                 ecpri_m = &rte_flow_item_ecpri_mask;
9848         /*
9849          * Maximal four DW samples are supported in a single matching now.
9850          * Two are used now for a eCPRI matching:
9851          * 1. Type: one byte, mask should be 0x00ff0000 in network order
9852          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
9853          *    if any.
9854          */
9855         if (!ecpri_m->hdr.common.u32)
9856                 return;
9857         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
9858         /* Need to take the whole DW as the mask to fill the entry. */
9859         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9860                             prog_sample_field_value_0);
9861         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9862                             prog_sample_field_value_0);
9863         /* Already big endian (network order) in the header. */
9864         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
9865         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
9866         /* Sample#0, used for matching type, offset 0. */
9867         MLX5_SET(fte_match_set_misc4, misc4_m,
9868                  prog_sample_field_id_0, samples[0]);
9869         /* It makes no sense to set the sample ID in the mask field. */
9870         MLX5_SET(fte_match_set_misc4, misc4_v,
9871                  prog_sample_field_id_0, samples[0]);
9872         /*
9873          * Checking if message body part needs to be matched.
9874          * Some wildcard rules only matching type field should be supported.
9875          */
9876         if (ecpri_m->hdr.dummy[0]) {
9877                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
9878                 switch (common.type) {
9879                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
9880                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
9881                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
9882                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9883                                             prog_sample_field_value_1);
9884                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9885                                             prog_sample_field_value_1);
9886                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
9887                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
9888                                             ecpri_m->hdr.dummy[0];
9889                         /* Sample#1, to match message body, offset 4. */
9890                         MLX5_SET(fte_match_set_misc4, misc4_m,
9891                                  prog_sample_field_id_1, samples[1]);
9892                         MLX5_SET(fte_match_set_misc4, misc4_v,
9893                                  prog_sample_field_id_1, samples[1]);
9894                         break;
9895                 default:
9896                         /* Others, do not match any sample ID. */
9897                         break;
9898                 }
9899         }
9900 }
9901
9902 /*
9903  * Add connection tracking status item to matcher
9904  *
9905  * @param[in] dev
9906  *   The devich to configure through.
9907  * @param[in, out] matcher
9908  *   Flow matcher.
9909  * @param[in, out] key
9910  *   Flow matcher value.
9911  * @param[in] item
9912  *   Flow pattern to translate.
9913  */
9914 static void
9915 flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
9916                               void *matcher, void *key,
9917                               const struct rte_flow_item *item)
9918 {
9919         uint32_t reg_value = 0;
9920         int reg_id;
9921         /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */
9922         uint32_t reg_mask = 0;
9923         const struct rte_flow_item_conntrack *spec = item->spec;
9924         const struct rte_flow_item_conntrack *mask = item->mask;
9925         uint32_t flags;
9926         struct rte_flow_error error;
9927
9928         if (!mask)
9929                 mask = &rte_flow_item_conntrack_mask;
9930         if (!spec || !mask->flags)
9931                 return;
9932         flags = spec->flags & mask->flags;
9933         /* The conflict should be checked in the validation. */
9934         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
9935                 reg_value |= MLX5_CT_SYNDROME_VALID;
9936         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
9937                 reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
9938         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
9939                 reg_value |= MLX5_CT_SYNDROME_INVALID;
9940         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
9941                 reg_value |= MLX5_CT_SYNDROME_TRAP;
9942         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
9943                 reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
9944         if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
9945                            RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
9946                            RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
9947                 reg_mask |= 0xc0;
9948         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
9949                 reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
9950         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
9951                 reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
9952         /* The REG_C_x value could be saved during startup. */
9953         reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);
9954         if (reg_id == REG_NON)
9955                 return;
9956         flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,
9957                                reg_value, reg_mask);
9958 }
9959
9960 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
9961
9962 #define HEADER_IS_ZERO(match_criteria, headers)                              \
9963         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
9964                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
9965
9966 /**
9967  * Calculate flow matcher enable bitmap.
9968  *
9969  * @param match_criteria
9970  *   Pointer to flow matcher criteria.
9971  *
9972  * @return
9973  *   Bitmap of enabled fields.
9974  */
9975 static uint8_t
9976 flow_dv_matcher_enable(uint32_t *match_criteria)
9977 {
9978         uint8_t match_criteria_enable;
9979
9980         match_criteria_enable =
9981                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
9982                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
9983         match_criteria_enable |=
9984                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
9985                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
9986         match_criteria_enable |=
9987                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
9988                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
9989         match_criteria_enable |=
9990                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
9991                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
9992         match_criteria_enable |=
9993                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
9994                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
9995         match_criteria_enable |=
9996                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
9997                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
9998         match_criteria_enable |=
9999                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_5)) <<
10000                 MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT;
10001         return match_criteria_enable;
10002 }
10003
10004 static void
10005 __flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria)
10006 {
10007         /*
10008          * Check flow matching criteria first, subtract misc5/4 length if flow
10009          * doesn't own misc5/4 parameters. In some old rdma-core releases,
10010          * misc5/4 are not supported, and matcher creation failure is expected
10011          * w/o subtration. If misc5 is provided, misc4 must be counted in since
10012          * misc5 is right after misc4.
10013          */
10014         if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) {
10015                 *size = MLX5_ST_SZ_BYTES(fte_match_param) -
10016                         MLX5_ST_SZ_BYTES(fte_match_set_misc5);
10017                 if (!(match_criteria & (1 <<
10018                         MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT))) {
10019                         *size -= MLX5_ST_SZ_BYTES(fte_match_set_misc4);
10020                 }
10021         }
10022 }
10023
10024 static struct mlx5_list_entry *
10025 flow_dv_matcher_clone_cb(struct mlx5_list *list __rte_unused,
10026                          struct mlx5_list_entry *entry, void *cb_ctx)
10027 {
10028         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10029         struct mlx5_flow_dv_matcher *ref = ctx->data;
10030         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10031                                                             typeof(*tbl), tbl);
10032         struct mlx5_flow_dv_matcher *resource = mlx5_malloc(MLX5_MEM_ANY,
10033                                                             sizeof(*resource),
10034                                                             0, SOCKET_ID_ANY);
10035
10036         if (!resource) {
10037                 rte_flow_error_set(ctx->error, ENOMEM,
10038                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10039                                    "cannot create matcher");
10040                 return NULL;
10041         }
10042         memcpy(resource, entry, sizeof(*resource));
10043         resource->tbl = &tbl->tbl;
10044         return &resource->entry;
10045 }
10046
10047 static void
10048 flow_dv_matcher_clone_free_cb(struct mlx5_list *list __rte_unused,
10049                              struct mlx5_list_entry *entry)
10050 {
10051         mlx5_free(entry);
10052 }
10053
10054 struct mlx5_hlist_entry *
10055 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
10056 {
10057         struct mlx5_dev_ctx_shared *sh = list->ctx;
10058         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10059         struct rte_eth_dev *dev = ctx->dev;
10060         struct mlx5_flow_tbl_data_entry *tbl_data;
10061         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
10062         struct rte_flow_error *error = ctx->error;
10063         union mlx5_flow_tbl_key key = { .v64 = key64 };
10064         struct mlx5_flow_tbl_resource *tbl;
10065         void *domain;
10066         uint32_t idx = 0;
10067         int ret;
10068
10069         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10070         if (!tbl_data) {
10071                 rte_flow_error_set(error, ENOMEM,
10072                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10073                                    NULL,
10074                                    "cannot allocate flow table data entry");
10075                 return NULL;
10076         }
10077         tbl_data->idx = idx;
10078         tbl_data->tunnel = tt_prm->tunnel;
10079         tbl_data->group_id = tt_prm->group_id;
10080         tbl_data->external = !!tt_prm->external;
10081         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
10082         tbl_data->is_egress = !!key.is_egress;
10083         tbl_data->is_transfer = !!key.is_fdb;
10084         tbl_data->dummy = !!key.dummy;
10085         tbl_data->level = key.level;
10086         tbl_data->id = key.id;
10087         tbl = &tbl_data->tbl;
10088         if (key.dummy)
10089                 return &tbl_data->entry;
10090         if (key.is_fdb)
10091                 domain = sh->fdb_domain;
10092         else if (key.is_egress)
10093                 domain = sh->tx_domain;
10094         else
10095                 domain = sh->rx_domain;
10096         ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
10097         if (ret) {
10098                 rte_flow_error_set(error, ENOMEM,
10099                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10100                                    NULL, "cannot create flow table object");
10101                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10102                 return NULL;
10103         }
10104         if (key.level != 0) {
10105                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
10106                                         (tbl->obj, &tbl_data->jump.action);
10107                 if (ret) {
10108                         rte_flow_error_set(error, ENOMEM,
10109                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10110                                            NULL,
10111                                            "cannot create flow jump action");
10112                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10113                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10114                         return NULL;
10115                 }
10116         }
10117         MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
10118               key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
10119               key.level, key.id);
10120         tbl_data->matchers = mlx5_list_create(matcher_name, sh, true,
10121                                               flow_dv_matcher_create_cb,
10122                                               flow_dv_matcher_match_cb,
10123                                               flow_dv_matcher_remove_cb,
10124                                               flow_dv_matcher_clone_cb,
10125                                               flow_dv_matcher_clone_free_cb);
10126         if (!tbl_data->matchers) {
10127                 rte_flow_error_set(error, ENOMEM,
10128                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10129                                    NULL,
10130                                    "cannot create tbl matcher list");
10131                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10132                 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10133                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10134                 return NULL;
10135         }
10136         return &tbl_data->entry;
10137 }
10138
10139 int
10140 flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused,
10141                      struct mlx5_hlist_entry *entry, uint64_t key64,
10142                      void *cb_ctx __rte_unused)
10143 {
10144         struct mlx5_flow_tbl_data_entry *tbl_data =
10145                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10146         union mlx5_flow_tbl_key key = { .v64 = key64 };
10147
10148         return tbl_data->level != key.level ||
10149                tbl_data->id != key.id ||
10150                tbl_data->dummy != key.dummy ||
10151                tbl_data->is_transfer != !!key.is_fdb ||
10152                tbl_data->is_egress != !!key.is_egress;
10153 }
10154
10155 /**
10156  * Get a flow table.
10157  *
10158  * @param[in, out] dev
10159  *   Pointer to rte_eth_dev structure.
10160  * @param[in] table_level
10161  *   Table level to use.
10162  * @param[in] egress
10163  *   Direction of the table.
10164  * @param[in] transfer
10165  *   E-Switch or NIC flow.
10166  * @param[in] dummy
10167  *   Dummy entry for dv API.
10168  * @param[in] table_id
10169  *   Table id to use.
10170  * @param[out] error
10171  *   pointer to error structure.
10172  *
10173  * @return
10174  *   Returns tables resource based on the index, NULL in case of failed.
10175  */
10176 struct mlx5_flow_tbl_resource *
10177 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
10178                          uint32_t table_level, uint8_t egress,
10179                          uint8_t transfer,
10180                          bool external,
10181                          const struct mlx5_flow_tunnel *tunnel,
10182                          uint32_t group_id, uint8_t dummy,
10183                          uint32_t table_id,
10184                          struct rte_flow_error *error)
10185 {
10186         struct mlx5_priv *priv = dev->data->dev_private;
10187         union mlx5_flow_tbl_key table_key = {
10188                 {
10189                         .level = table_level,
10190                         .id = table_id,
10191                         .reserved = 0,
10192                         .dummy = !!dummy,
10193                         .is_fdb = !!transfer,
10194                         .is_egress = !!egress,
10195                 }
10196         };
10197         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
10198                 .tunnel = tunnel,
10199                 .group_id = group_id,
10200                 .external = external,
10201         };
10202         struct mlx5_flow_cb_ctx ctx = {
10203                 .dev = dev,
10204                 .error = error,
10205                 .data = &tt_prm,
10206         };
10207         struct mlx5_hlist_entry *entry;
10208         struct mlx5_flow_tbl_data_entry *tbl_data;
10209
10210         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
10211         if (!entry) {
10212                 rte_flow_error_set(error, ENOMEM,
10213                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10214                                    "cannot get table");
10215                 return NULL;
10216         }
10217         DRV_LOG(DEBUG, "table_level %u table_id %u "
10218                 "tunnel %u group %u registered.",
10219                 table_level, table_id,
10220                 tunnel ? tunnel->tunnel_id : 0, group_id);
10221         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10222         return &tbl_data->tbl;
10223 }
10224
10225 void
10226 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
10227                       struct mlx5_hlist_entry *entry)
10228 {
10229         struct mlx5_dev_ctx_shared *sh = list->ctx;
10230         struct mlx5_flow_tbl_data_entry *tbl_data =
10231                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10232
10233         MLX5_ASSERT(entry && sh);
10234         if (tbl_data->jump.action)
10235                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10236         if (tbl_data->tbl.obj)
10237                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
10238         if (tbl_data->tunnel_offload && tbl_data->external) {
10239                 struct mlx5_hlist_entry *he;
10240                 struct mlx5_hlist *tunnel_grp_hash;
10241                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
10242                 union tunnel_tbl_key tunnel_key = {
10243                         .tunnel_id = tbl_data->tunnel ?
10244                                         tbl_data->tunnel->tunnel_id : 0,
10245                         .group = tbl_data->group_id
10246                 };
10247                 uint32_t table_level = tbl_data->level;
10248
10249                 tunnel_grp_hash = tbl_data->tunnel ?
10250                                         tbl_data->tunnel->groups :
10251                                         thub->groups;
10252                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
10253                 if (he)
10254                         mlx5_hlist_unregister(tunnel_grp_hash, he);
10255                 DRV_LOG(DEBUG,
10256                         "table_level %u id %u tunnel %u group %u released.",
10257                         table_level,
10258                         tbl_data->id,
10259                         tbl_data->tunnel ?
10260                         tbl_data->tunnel->tunnel_id : 0,
10261                         tbl_data->group_id);
10262         }
10263         mlx5_list_destroy(tbl_data->matchers);
10264         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10265 }
10266
10267 /**
10268  * Release a flow table.
10269  *
10270  * @param[in] sh
10271  *   Pointer to device shared structure.
10272  * @param[in] tbl
10273  *   Table resource to be released.
10274  *
10275  * @return
10276  *   Returns 0 if table was released, else return 1;
10277  */
10278 static int
10279 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
10280                              struct mlx5_flow_tbl_resource *tbl)
10281 {
10282         struct mlx5_flow_tbl_data_entry *tbl_data =
10283                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10284
10285         if (!tbl)
10286                 return 0;
10287         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
10288 }
10289
10290 int
10291 flow_dv_matcher_match_cb(struct mlx5_list *list __rte_unused,
10292                          struct mlx5_list_entry *entry, void *cb_ctx)
10293 {
10294         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10295         struct mlx5_flow_dv_matcher *ref = ctx->data;
10296         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
10297                                                         entry);
10298
10299         return cur->crc != ref->crc ||
10300                cur->priority != ref->priority ||
10301                memcmp((const void *)cur->mask.buf,
10302                       (const void *)ref->mask.buf, ref->mask.size);
10303 }
10304
10305 struct mlx5_list_entry *
10306 flow_dv_matcher_create_cb(struct mlx5_list *list,
10307                           struct mlx5_list_entry *entry __rte_unused,
10308                           void *cb_ctx)
10309 {
10310         struct mlx5_dev_ctx_shared *sh = list->ctx;
10311         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10312         struct mlx5_flow_dv_matcher *ref = ctx->data;
10313         struct mlx5_flow_dv_matcher *resource;
10314         struct mlx5dv_flow_matcher_attr dv_attr = {
10315                 .type = IBV_FLOW_ATTR_NORMAL,
10316                 .match_mask = (void *)&ref->mask,
10317         };
10318         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10319                                                             typeof(*tbl), tbl);
10320         int ret;
10321
10322         resource = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*resource), 0,
10323                                SOCKET_ID_ANY);
10324         if (!resource) {
10325                 rte_flow_error_set(ctx->error, ENOMEM,
10326                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10327                                    "cannot create matcher");
10328                 return NULL;
10329         }
10330         *resource = *ref;
10331         dv_attr.match_criteria_enable =
10332                 flow_dv_matcher_enable(resource->mask.buf);
10333         __flow_dv_adjust_buf_size(&ref->mask.size,
10334                                   dv_attr.match_criteria_enable);
10335         dv_attr.priority = ref->priority;
10336         if (tbl->is_egress)
10337                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
10338         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
10339                                                &resource->matcher_object);
10340         if (ret) {
10341                 mlx5_free(resource);
10342                 rte_flow_error_set(ctx->error, ENOMEM,
10343                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10344                                    "cannot create matcher");
10345                 return NULL;
10346         }
10347         return &resource->entry;
10348 }
10349
10350 /**
10351  * Register the flow matcher.
10352  *
10353  * @param[in, out] dev
10354  *   Pointer to rte_eth_dev structure.
10355  * @param[in, out] matcher
10356  *   Pointer to flow matcher.
10357  * @param[in, out] key
10358  *   Pointer to flow table key.
10359  * @parm[in, out] dev_flow
10360  *   Pointer to the dev_flow.
10361  * @param[out] error
10362  *   pointer to error structure.
10363  *
10364  * @return
10365  *   0 on success otherwise -errno and errno is set.
10366  */
10367 static int
10368 flow_dv_matcher_register(struct rte_eth_dev *dev,
10369                          struct mlx5_flow_dv_matcher *ref,
10370                          union mlx5_flow_tbl_key *key,
10371                          struct mlx5_flow *dev_flow,
10372                          const struct mlx5_flow_tunnel *tunnel,
10373                          uint32_t group_id,
10374                          struct rte_flow_error *error)
10375 {
10376         struct mlx5_list_entry *entry;
10377         struct mlx5_flow_dv_matcher *resource;
10378         struct mlx5_flow_tbl_resource *tbl;
10379         struct mlx5_flow_tbl_data_entry *tbl_data;
10380         struct mlx5_flow_cb_ctx ctx = {
10381                 .error = error,
10382                 .data = ref,
10383         };
10384         /**
10385          * tunnel offload API requires this registration for cases when
10386          * tunnel match rule was inserted before tunnel set rule.
10387          */
10388         tbl = flow_dv_tbl_resource_get(dev, key->level,
10389                                        key->is_egress, key->is_fdb,
10390                                        dev_flow->external, tunnel,
10391                                        group_id, 0, key->id, error);
10392         if (!tbl)
10393                 return -rte_errno;      /* No need to refill the error info */
10394         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10395         ref->tbl = tbl;
10396         entry = mlx5_list_register(tbl_data->matchers, &ctx);
10397         if (!entry) {
10398                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10399                 return rte_flow_error_set(error, ENOMEM,
10400                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10401                                           "cannot allocate ref memory");
10402         }
10403         resource = container_of(entry, typeof(*resource), entry);
10404         dev_flow->handle->dvh.matcher = resource;
10405         return 0;
10406 }
10407
10408 struct mlx5_hlist_entry *
10409 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
10410 {
10411         struct mlx5_dev_ctx_shared *sh = list->ctx;
10412         struct rte_flow_error *error = ctx;
10413         struct mlx5_flow_dv_tag_resource *entry;
10414         uint32_t idx = 0;
10415         int ret;
10416
10417         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10418         if (!entry) {
10419                 rte_flow_error_set(error, ENOMEM,
10420                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10421                                    "cannot allocate resource memory");
10422                 return NULL;
10423         }
10424         entry->idx = idx;
10425         entry->tag_id = key;
10426         ret = mlx5_flow_os_create_flow_action_tag(key,
10427                                                   &entry->action);
10428         if (ret) {
10429                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
10430                 rte_flow_error_set(error, ENOMEM,
10431                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10432                                    NULL, "cannot create action");
10433                 return NULL;
10434         }
10435         return &entry->entry;
10436 }
10437
10438 int
10439 flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused,
10440                      struct mlx5_hlist_entry *entry, uint64_t key,
10441                      void *cb_ctx __rte_unused)
10442 {
10443         struct mlx5_flow_dv_tag_resource *tag =
10444                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10445
10446         return key != tag->tag_id;
10447 }
10448
10449 /**
10450  * Find existing tag resource or create and register a new one.
10451  *
10452  * @param dev[in, out]
10453  *   Pointer to rte_eth_dev structure.
10454  * @param[in, out] tag_be24
10455  *   Tag value in big endian then R-shift 8.
10456  * @parm[in, out] dev_flow
10457  *   Pointer to the dev_flow.
10458  * @param[out] error
10459  *   pointer to error structure.
10460  *
10461  * @return
10462  *   0 on success otherwise -errno and errno is set.
10463  */
10464 static int
10465 flow_dv_tag_resource_register
10466                         (struct rte_eth_dev *dev,
10467                          uint32_t tag_be24,
10468                          struct mlx5_flow *dev_flow,
10469                          struct rte_flow_error *error)
10470 {
10471         struct mlx5_priv *priv = dev->data->dev_private;
10472         struct mlx5_flow_dv_tag_resource *resource;
10473         struct mlx5_hlist_entry *entry;
10474
10475         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
10476         if (entry) {
10477                 resource = container_of(entry, struct mlx5_flow_dv_tag_resource,
10478                                         entry);
10479                 dev_flow->handle->dvh.rix_tag = resource->idx;
10480                 dev_flow->dv.tag_resource = resource;
10481                 return 0;
10482         }
10483         return -rte_errno;
10484 }
10485
10486 void
10487 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
10488                       struct mlx5_hlist_entry *entry)
10489 {
10490         struct mlx5_dev_ctx_shared *sh = list->ctx;
10491         struct mlx5_flow_dv_tag_resource *tag =
10492                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10493
10494         MLX5_ASSERT(tag && sh && tag->action);
10495         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
10496         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
10497         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10498 }
10499
10500 /**
10501  * Release the tag.
10502  *
10503  * @param dev
10504  *   Pointer to Ethernet device.
10505  * @param tag_idx
10506  *   Tag index.
10507  *
10508  * @return
10509  *   1 while a reference on it exists, 0 when freed.
10510  */
10511 static int
10512 flow_dv_tag_release(struct rte_eth_dev *dev,
10513                     uint32_t tag_idx)
10514 {
10515         struct mlx5_priv *priv = dev->data->dev_private;
10516         struct mlx5_flow_dv_tag_resource *tag;
10517
10518         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
10519         if (!tag)
10520                 return 0;
10521         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
10522                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
10523         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
10524 }
10525
10526 /**
10527  * Translate port ID action to vport.
10528  *
10529  * @param[in] dev
10530  *   Pointer to rte_eth_dev structure.
10531  * @param[in] action
10532  *   Pointer to the port ID action.
10533  * @param[out] dst_port_id
10534  *   The target port ID.
10535  * @param[out] error
10536  *   Pointer to the error structure.
10537  *
10538  * @return
10539  *   0 on success, a negative errno value otherwise and rte_errno is set.
10540  */
10541 static int
10542 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
10543                                  const struct rte_flow_action *action,
10544                                  uint32_t *dst_port_id,
10545                                  struct rte_flow_error *error)
10546 {
10547         uint32_t port;
10548         struct mlx5_priv *priv;
10549         const struct rte_flow_action_port_id *conf =
10550                         (const struct rte_flow_action_port_id *)action->conf;
10551
10552         port = conf->original ? dev->data->port_id : conf->id;
10553         priv = mlx5_port_to_eswitch_info(port, false);
10554         if (!priv)
10555                 return rte_flow_error_set(error, -rte_errno,
10556                                           RTE_FLOW_ERROR_TYPE_ACTION,
10557                                           NULL,
10558                                           "No eswitch info was found for port");
10559 #ifdef HAVE_MLX5DV_DR_CREATE_DEST_IB_PORT
10560         /*
10561          * This parameter is transferred to
10562          * mlx5dv_dr_action_create_dest_ib_port().
10563          */
10564         *dst_port_id = priv->dev_port;
10565 #else
10566         /*
10567          * Legacy mode, no LAG configurations is supported.
10568          * This parameter is transferred to
10569          * mlx5dv_dr_action_create_dest_vport().
10570          */
10571         *dst_port_id = priv->vport_id;
10572 #endif
10573         return 0;
10574 }
10575
10576 /**
10577  * Create a counter with aging configuration.
10578  *
10579  * @param[in] dev
10580  *   Pointer to rte_eth_dev structure.
10581  * @param[in] dev_flow
10582  *   Pointer to the mlx5_flow.
10583  * @param[out] count
10584  *   Pointer to the counter action configuration.
10585  * @param[in] age
10586  *   Pointer to the aging action configuration.
10587  *
10588  * @return
10589  *   Index to flow counter on success, 0 otherwise.
10590  */
10591 static uint32_t
10592 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
10593                                 struct mlx5_flow *dev_flow,
10594                                 const struct rte_flow_action_count *count,
10595                                 const struct rte_flow_action_age *age)
10596 {
10597         uint32_t counter;
10598         struct mlx5_age_param *age_param;
10599
10600         if (count && count->shared)
10601                 counter = flow_dv_counter_get_shared(dev, count->id);
10602         else
10603                 counter = flow_dv_counter_alloc(dev, !!age);
10604         if (!counter || age == NULL)
10605                 return counter;
10606         age_param = flow_dv_counter_idx_get_age(dev, counter);
10607         age_param->context = age->context ? age->context :
10608                 (void *)(uintptr_t)(dev_flow->flow_idx);
10609         age_param->timeout = age->timeout;
10610         age_param->port_id = dev->data->port_id;
10611         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
10612         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
10613         return counter;
10614 }
10615
10616 /**
10617  * Add Tx queue matcher
10618  *
10619  * @param[in] dev
10620  *   Pointer to the dev struct.
10621  * @param[in, out] matcher
10622  *   Flow matcher.
10623  * @param[in, out] key
10624  *   Flow matcher value.
10625  * @param[in] item
10626  *   Flow pattern to translate.
10627  * @param[in] inner
10628  *   Item is inner pattern.
10629  */
10630 static void
10631 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
10632                                 void *matcher, void *key,
10633                                 const struct rte_flow_item *item)
10634 {
10635         const struct mlx5_rte_flow_item_tx_queue *queue_m;
10636         const struct mlx5_rte_flow_item_tx_queue *queue_v;
10637         void *misc_m =
10638                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
10639         void *misc_v =
10640                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
10641         struct mlx5_txq_ctrl *txq;
10642         uint32_t queue;
10643
10644
10645         queue_m = (const void *)item->mask;
10646         if (!queue_m)
10647                 return;
10648         queue_v = (const void *)item->spec;
10649         if (!queue_v)
10650                 return;
10651         txq = mlx5_txq_get(dev, queue_v->queue);
10652         if (!txq)
10653                 return;
10654         queue = txq->obj->sq->id;
10655         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
10656         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
10657                  queue & queue_m->queue);
10658         mlx5_txq_release(dev, queue_v->queue);
10659 }
10660
10661 /**
10662  * Set the hash fields according to the @p flow information.
10663  *
10664  * @param[in] dev_flow
10665  *   Pointer to the mlx5_flow.
10666  * @param[in] rss_desc
10667  *   Pointer to the mlx5_flow_rss_desc.
10668  */
10669 static void
10670 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
10671                        struct mlx5_flow_rss_desc *rss_desc)
10672 {
10673         uint64_t items = dev_flow->handle->layers;
10674         int rss_inner = 0;
10675         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
10676
10677         dev_flow->hash_fields = 0;
10678 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
10679         if (rss_desc->level >= 2) {
10680                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
10681                 rss_inner = 1;
10682         }
10683 #endif
10684         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
10685             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
10686                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
10687                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10688                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
10689                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10690                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
10691                         else
10692                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
10693                 }
10694         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
10695                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
10696                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
10697                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10698                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
10699                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10700                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
10701                         else
10702                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
10703                 }
10704         }
10705         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
10706             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
10707                 if (rss_types & ETH_RSS_UDP) {
10708                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10709                                 dev_flow->hash_fields |=
10710                                                 IBV_RX_HASH_SRC_PORT_UDP;
10711                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10712                                 dev_flow->hash_fields |=
10713                                                 IBV_RX_HASH_DST_PORT_UDP;
10714                         else
10715                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
10716                 }
10717         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
10718                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
10719                 if (rss_types & ETH_RSS_TCP) {
10720                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10721                                 dev_flow->hash_fields |=
10722                                                 IBV_RX_HASH_SRC_PORT_TCP;
10723                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10724                                 dev_flow->hash_fields |=
10725                                                 IBV_RX_HASH_DST_PORT_TCP;
10726                         else
10727                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
10728                 }
10729         }
10730 }
10731
10732 /**
10733  * Prepare an Rx Hash queue.
10734  *
10735  * @param dev
10736  *   Pointer to Ethernet device.
10737  * @param[in] dev_flow
10738  *   Pointer to the mlx5_flow.
10739  * @param[in] rss_desc
10740  *   Pointer to the mlx5_flow_rss_desc.
10741  * @param[out] hrxq_idx
10742  *   Hash Rx queue index.
10743  *
10744  * @return
10745  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
10746  */
10747 static struct mlx5_hrxq *
10748 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
10749                      struct mlx5_flow *dev_flow,
10750                      struct mlx5_flow_rss_desc *rss_desc,
10751                      uint32_t *hrxq_idx)
10752 {
10753         struct mlx5_priv *priv = dev->data->dev_private;
10754         struct mlx5_flow_handle *dh = dev_flow->handle;
10755         struct mlx5_hrxq *hrxq;
10756
10757         MLX5_ASSERT(rss_desc->queue_num);
10758         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
10759         rss_desc->hash_fields = dev_flow->hash_fields;
10760         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
10761         rss_desc->shared_rss = 0;
10762         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
10763         if (!*hrxq_idx)
10764                 return NULL;
10765         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10766                               *hrxq_idx);
10767         return hrxq;
10768 }
10769
10770 /**
10771  * Release sample sub action resource.
10772  *
10773  * @param[in, out] dev
10774  *   Pointer to rte_eth_dev structure.
10775  * @param[in] act_res
10776  *   Pointer to sample sub action resource.
10777  */
10778 static void
10779 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
10780                                    struct mlx5_flow_sub_actions_idx *act_res)
10781 {
10782         if (act_res->rix_hrxq) {
10783                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
10784                 act_res->rix_hrxq = 0;
10785         }
10786         if (act_res->rix_encap_decap) {
10787                 flow_dv_encap_decap_resource_release(dev,
10788                                                      act_res->rix_encap_decap);
10789                 act_res->rix_encap_decap = 0;
10790         }
10791         if (act_res->rix_port_id_action) {
10792                 flow_dv_port_id_action_resource_release(dev,
10793                                                 act_res->rix_port_id_action);
10794                 act_res->rix_port_id_action = 0;
10795         }
10796         if (act_res->rix_tag) {
10797                 flow_dv_tag_release(dev, act_res->rix_tag);
10798                 act_res->rix_tag = 0;
10799         }
10800         if (act_res->rix_jump) {
10801                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
10802                 act_res->rix_jump = 0;
10803         }
10804 }
10805
10806 int
10807 flow_dv_sample_match_cb(struct mlx5_list *list __rte_unused,
10808                         struct mlx5_list_entry *entry, void *cb_ctx)
10809 {
10810         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10811         struct rte_eth_dev *dev = ctx->dev;
10812         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
10813         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
10814                                                               typeof(*resource),
10815                                                               entry);
10816
10817         if (ctx_resource->ratio == resource->ratio &&
10818             ctx_resource->ft_type == resource->ft_type &&
10819             ctx_resource->ft_id == resource->ft_id &&
10820             ctx_resource->set_action == resource->set_action &&
10821             !memcmp((void *)&ctx_resource->sample_act,
10822                     (void *)&resource->sample_act,
10823                     sizeof(struct mlx5_flow_sub_actions_list))) {
10824                 /*
10825                  * Existing sample action should release the prepared
10826                  * sub-actions reference counter.
10827                  */
10828                 flow_dv_sample_sub_actions_release(dev,
10829                                                    &ctx_resource->sample_idx);
10830                 return 0;
10831         }
10832         return 1;
10833 }
10834
10835 struct mlx5_list_entry *
10836 flow_dv_sample_create_cb(struct mlx5_list *list __rte_unused,
10837                          struct mlx5_list_entry *entry __rte_unused,
10838                          void *cb_ctx)
10839 {
10840         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10841         struct rte_eth_dev *dev = ctx->dev;
10842         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
10843         void **sample_dv_actions = ctx_resource->sub_actions;
10844         struct mlx5_flow_dv_sample_resource *resource;
10845         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
10846         struct mlx5_priv *priv = dev->data->dev_private;
10847         struct mlx5_dev_ctx_shared *sh = priv->sh;
10848         struct mlx5_flow_tbl_resource *tbl;
10849         uint32_t idx = 0;
10850         const uint32_t next_ft_step = 1;
10851         uint32_t next_ft_id = ctx_resource->ft_id + next_ft_step;
10852         uint8_t is_egress = 0;
10853         uint8_t is_transfer = 0;
10854         struct rte_flow_error *error = ctx->error;
10855
10856         /* Register new sample resource. */
10857         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
10858         if (!resource) {
10859                 rte_flow_error_set(error, ENOMEM,
10860                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10861                                           NULL,
10862                                           "cannot allocate resource memory");
10863                 return NULL;
10864         }
10865         *resource = *ctx_resource;
10866         /* Create normal path table level */
10867         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10868                 is_transfer = 1;
10869         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
10870                 is_egress = 1;
10871         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
10872                                         is_egress, is_transfer,
10873                                         true, NULL, 0, 0, 0, error);
10874         if (!tbl) {
10875                 rte_flow_error_set(error, ENOMEM,
10876                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10877                                           NULL,
10878                                           "fail to create normal path table "
10879                                           "for sample");
10880                 goto error;
10881         }
10882         resource->normal_path_tbl = tbl;
10883         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
10884                 if (!sh->default_miss_action) {
10885                         rte_flow_error_set(error, ENOMEM,
10886                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10887                                                 NULL,
10888                                                 "default miss action was not "
10889                                                 "created");
10890                         goto error;
10891                 }
10892                 sample_dv_actions[ctx_resource->sample_act.actions_num++] =
10893                                                 sh->default_miss_action;
10894         }
10895         /* Create a DR sample action */
10896         sampler_attr.sample_ratio = resource->ratio;
10897         sampler_attr.default_next_table = tbl->obj;
10898         sampler_attr.num_sample_actions = ctx_resource->sample_act.actions_num;
10899         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
10900                                                         &sample_dv_actions[0];
10901         sampler_attr.action = resource->set_action;
10902         if (mlx5_os_flow_dr_create_flow_action_sampler
10903                         (&sampler_attr, &resource->verbs_action)) {
10904                 rte_flow_error_set(error, ENOMEM,
10905                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10906                                         NULL, "cannot create sample action");
10907                 goto error;
10908         }
10909         resource->idx = idx;
10910         resource->dev = dev;
10911         return &resource->entry;
10912 error:
10913         if (resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
10914                 flow_dv_sample_sub_actions_release(dev,
10915                                                    &resource->sample_idx);
10916         if (resource->normal_path_tbl)
10917                 flow_dv_tbl_resource_release(MLX5_SH(dev),
10918                                 resource->normal_path_tbl);
10919         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
10920         return NULL;
10921
10922 }
10923
10924 struct mlx5_list_entry *
10925 flow_dv_sample_clone_cb(struct mlx5_list *list __rte_unused,
10926                          struct mlx5_list_entry *entry __rte_unused,
10927                          void *cb_ctx)
10928 {
10929         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10930         struct rte_eth_dev *dev = ctx->dev;
10931         struct mlx5_flow_dv_sample_resource *resource;
10932         struct mlx5_priv *priv = dev->data->dev_private;
10933         struct mlx5_dev_ctx_shared *sh = priv->sh;
10934         uint32_t idx = 0;
10935
10936         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
10937         if (!resource) {
10938                 rte_flow_error_set(ctx->error, ENOMEM,
10939                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10940                                           NULL,
10941                                           "cannot allocate resource memory");
10942                 return NULL;
10943         }
10944         memcpy(resource, entry, sizeof(*resource));
10945         resource->idx = idx;
10946         resource->dev = dev;
10947         return &resource->entry;
10948 }
10949
10950 void
10951 flow_dv_sample_clone_free_cb(struct mlx5_list *list __rte_unused,
10952                          struct mlx5_list_entry *entry)
10953 {
10954         struct mlx5_flow_dv_sample_resource *resource =
10955                         container_of(entry, typeof(*resource), entry);
10956         struct rte_eth_dev *dev = resource->dev;
10957         struct mlx5_priv *priv = dev->data->dev_private;
10958
10959         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
10960                         resource->idx);
10961 }
10962
10963 /**
10964  * Find existing sample resource or create and register a new one.
10965  *
10966  * @param[in, out] dev
10967  *   Pointer to rte_eth_dev structure.
10968  * @param[in] ref
10969  *   Pointer to sample resource reference.
10970  * @parm[in, out] dev_flow
10971  *   Pointer to the dev_flow.
10972  * @param[out] error
10973  *   pointer to error structure.
10974  *
10975  * @return
10976  *   0 on success otherwise -errno and errno is set.
10977  */
10978 static int
10979 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
10980                          struct mlx5_flow_dv_sample_resource *ref,
10981                          struct mlx5_flow *dev_flow,
10982                          struct rte_flow_error *error)
10983 {
10984         struct mlx5_flow_dv_sample_resource *resource;
10985         struct mlx5_list_entry *entry;
10986         struct mlx5_priv *priv = dev->data->dev_private;
10987         struct mlx5_flow_cb_ctx ctx = {
10988                 .dev = dev,
10989                 .error = error,
10990                 .data = ref,
10991         };
10992
10993         entry = mlx5_list_register(priv->sh->sample_action_list, &ctx);
10994         if (!entry)
10995                 return -rte_errno;
10996         resource = container_of(entry, typeof(*resource), entry);
10997         dev_flow->handle->dvh.rix_sample = resource->idx;
10998         dev_flow->dv.sample_res = resource;
10999         return 0;
11000 }
11001
11002 int
11003 flow_dv_dest_array_match_cb(struct mlx5_list *list __rte_unused,
11004                             struct mlx5_list_entry *entry, void *cb_ctx)
11005 {
11006         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11007         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11008         struct rte_eth_dev *dev = ctx->dev;
11009         struct mlx5_flow_dv_dest_array_resource *resource =
11010                         container_of(entry, typeof(*resource), entry);
11011         uint32_t idx = 0;
11012
11013         if (ctx_resource->num_of_dest == resource->num_of_dest &&
11014             ctx_resource->ft_type == resource->ft_type &&
11015             !memcmp((void *)resource->sample_act,
11016                     (void *)ctx_resource->sample_act,
11017                    (ctx_resource->num_of_dest *
11018                    sizeof(struct mlx5_flow_sub_actions_list)))) {
11019                 /*
11020                  * Existing sample action should release the prepared
11021                  * sub-actions reference counter.
11022                  */
11023                 for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11024                         flow_dv_sample_sub_actions_release(dev,
11025                                         &ctx_resource->sample_idx[idx]);
11026                 return 0;
11027         }
11028         return 1;
11029 }
11030
11031 struct mlx5_list_entry *
11032 flow_dv_dest_array_create_cb(struct mlx5_list *list __rte_unused,
11033                          struct mlx5_list_entry *entry __rte_unused,
11034                          void *cb_ctx)
11035 {
11036         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11037         struct rte_eth_dev *dev = ctx->dev;
11038         struct mlx5_flow_dv_dest_array_resource *resource;
11039         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11040         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
11041         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
11042         struct mlx5_priv *priv = dev->data->dev_private;
11043         struct mlx5_dev_ctx_shared *sh = priv->sh;
11044         struct mlx5_flow_sub_actions_list *sample_act;
11045         struct mlx5dv_dr_domain *domain;
11046         uint32_t idx = 0, res_idx = 0;
11047         struct rte_flow_error *error = ctx->error;
11048         uint64_t action_flags;
11049         int ret;
11050
11051         /* Register new destination array resource. */
11052         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11053                                             &res_idx);
11054         if (!resource) {
11055                 rte_flow_error_set(error, ENOMEM,
11056                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11057                                           NULL,
11058                                           "cannot allocate resource memory");
11059                 return NULL;
11060         }
11061         *resource = *ctx_resource;
11062         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11063                 domain = sh->fdb_domain;
11064         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
11065                 domain = sh->rx_domain;
11066         else
11067                 domain = sh->tx_domain;
11068         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11069                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
11070                                  mlx5_malloc(MLX5_MEM_ZERO,
11071                                  sizeof(struct mlx5dv_dr_action_dest_attr),
11072                                  0, SOCKET_ID_ANY);
11073                 if (!dest_attr[idx]) {
11074                         rte_flow_error_set(error, ENOMEM,
11075                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11076                                            NULL,
11077                                            "cannot allocate resource memory");
11078                         goto error;
11079                 }
11080                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
11081                 sample_act = &ctx_resource->sample_act[idx];
11082                 action_flags = sample_act->action_flags;
11083                 switch (action_flags) {
11084                 case MLX5_FLOW_ACTION_QUEUE:
11085                         dest_attr[idx]->dest = sample_act->dr_queue_action;
11086                         break;
11087                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
11088                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
11089                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
11090                         dest_attr[idx]->dest_reformat->reformat =
11091                                         sample_act->dr_encap_action;
11092                         dest_attr[idx]->dest_reformat->dest =
11093                                         sample_act->dr_port_id_action;
11094                         break;
11095                 case MLX5_FLOW_ACTION_PORT_ID:
11096                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
11097                         break;
11098                 case MLX5_FLOW_ACTION_JUMP:
11099                         dest_attr[idx]->dest = sample_act->dr_jump_action;
11100                         break;
11101                 default:
11102                         rte_flow_error_set(error, EINVAL,
11103                                            RTE_FLOW_ERROR_TYPE_ACTION,
11104                                            NULL,
11105                                            "unsupported actions type");
11106                         goto error;
11107                 }
11108         }
11109         /* create a dest array actioin */
11110         ret = mlx5_os_flow_dr_create_flow_action_dest_array
11111                                                 (domain,
11112                                                  resource->num_of_dest,
11113                                                  dest_attr,
11114                                                  &resource->action);
11115         if (ret) {
11116                 rte_flow_error_set(error, ENOMEM,
11117                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11118                                    NULL,
11119                                    "cannot create destination array action");
11120                 goto error;
11121         }
11122         resource->idx = res_idx;
11123         resource->dev = dev;
11124         for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11125                 mlx5_free(dest_attr[idx]);
11126         return &resource->entry;
11127 error:
11128         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11129                 flow_dv_sample_sub_actions_release(dev,
11130                                                    &resource->sample_idx[idx]);
11131                 if (dest_attr[idx])
11132                         mlx5_free(dest_attr[idx]);
11133         }
11134         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
11135         return NULL;
11136 }
11137
11138 struct mlx5_list_entry *
11139 flow_dv_dest_array_clone_cb(struct mlx5_list *list __rte_unused,
11140                          struct mlx5_list_entry *entry __rte_unused,
11141                          void *cb_ctx)
11142 {
11143         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11144         struct rte_eth_dev *dev = ctx->dev;
11145         struct mlx5_flow_dv_dest_array_resource *resource;
11146         struct mlx5_priv *priv = dev->data->dev_private;
11147         struct mlx5_dev_ctx_shared *sh = priv->sh;
11148         uint32_t res_idx = 0;
11149         struct rte_flow_error *error = ctx->error;
11150
11151         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11152                                       &res_idx);
11153         if (!resource) {
11154                 rte_flow_error_set(error, ENOMEM,
11155                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11156                                           NULL,
11157                                           "cannot allocate dest-array memory");
11158                 return NULL;
11159         }
11160         memcpy(resource, entry, sizeof(*resource));
11161         resource->idx = res_idx;
11162         resource->dev = dev;
11163         return &resource->entry;
11164 }
11165
11166 void
11167 flow_dv_dest_array_clone_free_cb(struct mlx5_list *list __rte_unused,
11168                              struct mlx5_list_entry *entry)
11169 {
11170         struct mlx5_flow_dv_dest_array_resource *resource =
11171                         container_of(entry, typeof(*resource), entry);
11172         struct rte_eth_dev *dev = resource->dev;
11173         struct mlx5_priv *priv = dev->data->dev_private;
11174
11175         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
11176 }
11177
11178 /**
11179  * Find existing destination array resource or create and register a new one.
11180  *
11181  * @param[in, out] dev
11182  *   Pointer to rte_eth_dev structure.
11183  * @param[in] ref
11184  *   Pointer to destination array resource reference.
11185  * @parm[in, out] dev_flow
11186  *   Pointer to the dev_flow.
11187  * @param[out] error
11188  *   pointer to error structure.
11189  *
11190  * @return
11191  *   0 on success otherwise -errno and errno is set.
11192  */
11193 static int
11194 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
11195                          struct mlx5_flow_dv_dest_array_resource *ref,
11196                          struct mlx5_flow *dev_flow,
11197                          struct rte_flow_error *error)
11198 {
11199         struct mlx5_flow_dv_dest_array_resource *resource;
11200         struct mlx5_priv *priv = dev->data->dev_private;
11201         struct mlx5_list_entry *entry;
11202         struct mlx5_flow_cb_ctx ctx = {
11203                 .dev = dev,
11204                 .error = error,
11205                 .data = ref,
11206         };
11207
11208         entry = mlx5_list_register(priv->sh->dest_array_list, &ctx);
11209         if (!entry)
11210                 return -rte_errno;
11211         resource = container_of(entry, typeof(*resource), entry);
11212         dev_flow->handle->dvh.rix_dest_array = resource->idx;
11213         dev_flow->dv.dest_array_res = resource;
11214         return 0;
11215 }
11216
11217 /**
11218  * Convert Sample action to DV specification.
11219  *
11220  * @param[in] dev
11221  *   Pointer to rte_eth_dev structure.
11222  * @param[in] action
11223  *   Pointer to sample action structure.
11224  * @param[in, out] dev_flow
11225  *   Pointer to the mlx5_flow.
11226  * @param[in] attr
11227  *   Pointer to the flow attributes.
11228  * @param[in, out] num_of_dest
11229  *   Pointer to the num of destination.
11230  * @param[in, out] sample_actions
11231  *   Pointer to sample actions list.
11232  * @param[in, out] res
11233  *   Pointer to sample resource.
11234  * @param[out] error
11235  *   Pointer to the error structure.
11236  *
11237  * @return
11238  *   0 on success, a negative errno value otherwise and rte_errno is set.
11239  */
11240 static int
11241 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
11242                                 const struct rte_flow_action_sample *action,
11243                                 struct mlx5_flow *dev_flow,
11244                                 const struct rte_flow_attr *attr,
11245                                 uint32_t *num_of_dest,
11246                                 void **sample_actions,
11247                                 struct mlx5_flow_dv_sample_resource *res,
11248                                 struct rte_flow_error *error)
11249 {
11250         struct mlx5_priv *priv = dev->data->dev_private;
11251         const struct rte_flow_action *sub_actions;
11252         struct mlx5_flow_sub_actions_list *sample_act;
11253         struct mlx5_flow_sub_actions_idx *sample_idx;
11254         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11255         struct rte_flow *flow = dev_flow->flow;
11256         struct mlx5_flow_rss_desc *rss_desc;
11257         uint64_t action_flags = 0;
11258
11259         MLX5_ASSERT(wks);
11260         rss_desc = &wks->rss_desc;
11261         sample_act = &res->sample_act;
11262         sample_idx = &res->sample_idx;
11263         res->ratio = action->ratio;
11264         sub_actions = action->actions;
11265         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
11266                 int type = sub_actions->type;
11267                 uint32_t pre_rix = 0;
11268                 void *pre_r;
11269                 switch (type) {
11270                 case RTE_FLOW_ACTION_TYPE_QUEUE:
11271                 {
11272                         const struct rte_flow_action_queue *queue;
11273                         struct mlx5_hrxq *hrxq;
11274                         uint32_t hrxq_idx;
11275
11276                         queue = sub_actions->conf;
11277                         rss_desc->queue_num = 1;
11278                         rss_desc->queue[0] = queue->index;
11279                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11280                                                     rss_desc, &hrxq_idx);
11281                         if (!hrxq)
11282                                 return rte_flow_error_set
11283                                         (error, rte_errno,
11284                                          RTE_FLOW_ERROR_TYPE_ACTION,
11285                                          NULL,
11286                                          "cannot create fate queue");
11287                         sample_act->dr_queue_action = hrxq->action;
11288                         sample_idx->rix_hrxq = hrxq_idx;
11289                         sample_actions[sample_act->actions_num++] =
11290                                                 hrxq->action;
11291                         (*num_of_dest)++;
11292                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
11293                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11294                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11295                         dev_flow->handle->fate_action =
11296                                         MLX5_FLOW_FATE_QUEUE;
11297                         break;
11298                 }
11299                 case RTE_FLOW_ACTION_TYPE_RSS:
11300                 {
11301                         struct mlx5_hrxq *hrxq;
11302                         uint32_t hrxq_idx;
11303                         const struct rte_flow_action_rss *rss;
11304                         const uint8_t *rss_key;
11305
11306                         rss = sub_actions->conf;
11307                         memcpy(rss_desc->queue, rss->queue,
11308                                rss->queue_num * sizeof(uint16_t));
11309                         rss_desc->queue_num = rss->queue_num;
11310                         /* NULL RSS key indicates default RSS key. */
11311                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11312                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11313                         /*
11314                          * rss->level and rss.types should be set in advance
11315                          * when expanding items for RSS.
11316                          */
11317                         flow_dv_hashfields_set(dev_flow, rss_desc);
11318                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11319                                                     rss_desc, &hrxq_idx);
11320                         if (!hrxq)
11321                                 return rte_flow_error_set
11322                                         (error, rte_errno,
11323                                          RTE_FLOW_ERROR_TYPE_ACTION,
11324                                          NULL,
11325                                          "cannot create fate queue");
11326                         sample_act->dr_queue_action = hrxq->action;
11327                         sample_idx->rix_hrxq = hrxq_idx;
11328                         sample_actions[sample_act->actions_num++] =
11329                                                 hrxq->action;
11330                         (*num_of_dest)++;
11331                         action_flags |= MLX5_FLOW_ACTION_RSS;
11332                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11333                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11334                         dev_flow->handle->fate_action =
11335                                         MLX5_FLOW_FATE_QUEUE;
11336                         break;
11337                 }
11338                 case RTE_FLOW_ACTION_TYPE_MARK:
11339                 {
11340                         uint32_t tag_be = mlx5_flow_mark_set
11341                                 (((const struct rte_flow_action_mark *)
11342                                 (sub_actions->conf))->id);
11343
11344                         dev_flow->handle->mark = 1;
11345                         pre_rix = dev_flow->handle->dvh.rix_tag;
11346                         /* Save the mark resource before sample */
11347                         pre_r = dev_flow->dv.tag_resource;
11348                         if (flow_dv_tag_resource_register(dev, tag_be,
11349                                                   dev_flow, error))
11350                                 return -rte_errno;
11351                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11352                         sample_act->dr_tag_action =
11353                                 dev_flow->dv.tag_resource->action;
11354                         sample_idx->rix_tag =
11355                                 dev_flow->handle->dvh.rix_tag;
11356                         sample_actions[sample_act->actions_num++] =
11357                                                 sample_act->dr_tag_action;
11358                         /* Recover the mark resource after sample */
11359                         dev_flow->dv.tag_resource = pre_r;
11360                         dev_flow->handle->dvh.rix_tag = pre_rix;
11361                         action_flags |= MLX5_FLOW_ACTION_MARK;
11362                         break;
11363                 }
11364                 case RTE_FLOW_ACTION_TYPE_COUNT:
11365                 {
11366                         if (!flow->counter) {
11367                                 flow->counter =
11368                                         flow_dv_translate_create_counter(dev,
11369                                                 dev_flow, sub_actions->conf,
11370                                                 0);
11371                                 if (!flow->counter)
11372                                         return rte_flow_error_set
11373                                                 (error, rte_errno,
11374                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11375                                                 NULL,
11376                                                 "cannot create counter"
11377                                                 " object.");
11378                         }
11379                         sample_act->dr_cnt_action =
11380                                   (flow_dv_counter_get_by_idx(dev,
11381                                   flow->counter, NULL))->action;
11382                         sample_actions[sample_act->actions_num++] =
11383                                                 sample_act->dr_cnt_action;
11384                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11385                         break;
11386                 }
11387                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11388                 {
11389                         struct mlx5_flow_dv_port_id_action_resource
11390                                         port_id_resource;
11391                         uint32_t port_id = 0;
11392
11393                         memset(&port_id_resource, 0, sizeof(port_id_resource));
11394                         /* Save the port id resource before sample */
11395                         pre_rix = dev_flow->handle->rix_port_id_action;
11396                         pre_r = dev_flow->dv.port_id_action;
11397                         if (flow_dv_translate_action_port_id(dev, sub_actions,
11398                                                              &port_id, error))
11399                                 return -rte_errno;
11400                         port_id_resource.port_id = port_id;
11401                         if (flow_dv_port_id_action_resource_register
11402                             (dev, &port_id_resource, dev_flow, error))
11403                                 return -rte_errno;
11404                         sample_act->dr_port_id_action =
11405                                 dev_flow->dv.port_id_action->action;
11406                         sample_idx->rix_port_id_action =
11407                                 dev_flow->handle->rix_port_id_action;
11408                         sample_actions[sample_act->actions_num++] =
11409                                                 sample_act->dr_port_id_action;
11410                         /* Recover the port id resource after sample */
11411                         dev_flow->dv.port_id_action = pre_r;
11412                         dev_flow->handle->rix_port_id_action = pre_rix;
11413                         (*num_of_dest)++;
11414                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11415                         break;
11416                 }
11417                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11418                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11419                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11420                         /* Save the encap resource before sample */
11421                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
11422                         pre_r = dev_flow->dv.encap_decap;
11423                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
11424                                                            dev_flow,
11425                                                            attr->transfer,
11426                                                            error))
11427                                 return -rte_errno;
11428                         sample_act->dr_encap_action =
11429                                 dev_flow->dv.encap_decap->action;
11430                         sample_idx->rix_encap_decap =
11431                                 dev_flow->handle->dvh.rix_encap_decap;
11432                         sample_actions[sample_act->actions_num++] =
11433                                                 sample_act->dr_encap_action;
11434                         /* Recover the encap resource after sample */
11435                         dev_flow->dv.encap_decap = pre_r;
11436                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
11437                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11438                         break;
11439                 default:
11440                         return rte_flow_error_set(error, EINVAL,
11441                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11442                                 NULL,
11443                                 "Not support for sampler action");
11444                 }
11445         }
11446         sample_act->action_flags = action_flags;
11447         res->ft_id = dev_flow->dv.group;
11448         if (attr->transfer) {
11449                 union {
11450                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
11451                         uint64_t set_action;
11452                 } action_ctx = { .set_action = 0 };
11453
11454                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
11455                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
11456                          MLX5_MODIFICATION_TYPE_SET);
11457                 MLX5_SET(set_action_in, action_ctx.action_in, field,
11458                          MLX5_MODI_META_REG_C_0);
11459                 MLX5_SET(set_action_in, action_ctx.action_in, data,
11460                          priv->vport_meta_tag);
11461                 res->set_action = action_ctx.set_action;
11462         } else if (attr->ingress) {
11463                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11464         } else {
11465                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
11466         }
11467         return 0;
11468 }
11469
11470 /**
11471  * Convert Sample action to DV specification.
11472  *
11473  * @param[in] dev
11474  *   Pointer to rte_eth_dev structure.
11475  * @param[in, out] dev_flow
11476  *   Pointer to the mlx5_flow.
11477  * @param[in] num_of_dest
11478  *   The num of destination.
11479  * @param[in, out] res
11480  *   Pointer to sample resource.
11481  * @param[in, out] mdest_res
11482  *   Pointer to destination array resource.
11483  * @param[in] sample_actions
11484  *   Pointer to sample path actions list.
11485  * @param[in] action_flags
11486  *   Holds the actions detected until now.
11487  * @param[out] error
11488  *   Pointer to the error structure.
11489  *
11490  * @return
11491  *   0 on success, a negative errno value otherwise and rte_errno is set.
11492  */
11493 static int
11494 flow_dv_create_action_sample(struct rte_eth_dev *dev,
11495                              struct mlx5_flow *dev_flow,
11496                              uint32_t num_of_dest,
11497                              struct mlx5_flow_dv_sample_resource *res,
11498                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
11499                              void **sample_actions,
11500                              uint64_t action_flags,
11501                              struct rte_flow_error *error)
11502 {
11503         /* update normal path action resource into last index of array */
11504         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
11505         struct mlx5_flow_sub_actions_list *sample_act =
11506                                         &mdest_res->sample_act[dest_index];
11507         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11508         struct mlx5_flow_rss_desc *rss_desc;
11509         uint32_t normal_idx = 0;
11510         struct mlx5_hrxq *hrxq;
11511         uint32_t hrxq_idx;
11512
11513         MLX5_ASSERT(wks);
11514         rss_desc = &wks->rss_desc;
11515         if (num_of_dest > 1) {
11516                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
11517                         /* Handle QP action for mirroring */
11518                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11519                                                     rss_desc, &hrxq_idx);
11520                         if (!hrxq)
11521                                 return rte_flow_error_set
11522                                      (error, rte_errno,
11523                                       RTE_FLOW_ERROR_TYPE_ACTION,
11524                                       NULL,
11525                                       "cannot create rx queue");
11526                         normal_idx++;
11527                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
11528                         sample_act->dr_queue_action = hrxq->action;
11529                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11530                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11531                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11532                 }
11533                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
11534                         normal_idx++;
11535                         mdest_res->sample_idx[dest_index].rix_encap_decap =
11536                                 dev_flow->handle->dvh.rix_encap_decap;
11537                         sample_act->dr_encap_action =
11538                                 dev_flow->dv.encap_decap->action;
11539                         dev_flow->handle->dvh.rix_encap_decap = 0;
11540                 }
11541                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
11542                         normal_idx++;
11543                         mdest_res->sample_idx[dest_index].rix_port_id_action =
11544                                 dev_flow->handle->rix_port_id_action;
11545                         sample_act->dr_port_id_action =
11546                                 dev_flow->dv.port_id_action->action;
11547                         dev_flow->handle->rix_port_id_action = 0;
11548                 }
11549                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
11550                         normal_idx++;
11551                         mdest_res->sample_idx[dest_index].rix_jump =
11552                                 dev_flow->handle->rix_jump;
11553                         sample_act->dr_jump_action =
11554                                 dev_flow->dv.jump->action;
11555                         dev_flow->handle->rix_jump = 0;
11556                 }
11557                 sample_act->actions_num = normal_idx;
11558                 /* update sample action resource into first index of array */
11559                 mdest_res->ft_type = res->ft_type;
11560                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
11561                                 sizeof(struct mlx5_flow_sub_actions_idx));
11562                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
11563                                 sizeof(struct mlx5_flow_sub_actions_list));
11564                 mdest_res->num_of_dest = num_of_dest;
11565                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
11566                                                          dev_flow, error))
11567                         return rte_flow_error_set(error, EINVAL,
11568                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11569                                                   NULL, "can't create sample "
11570                                                   "action");
11571         } else {
11572                 res->sub_actions = sample_actions;
11573                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
11574                         return rte_flow_error_set(error, EINVAL,
11575                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11576                                                   NULL,
11577                                                   "can't create sample action");
11578         }
11579         return 0;
11580 }
11581
11582 /**
11583  * Remove an ASO age action from age actions list.
11584  *
11585  * @param[in] dev
11586  *   Pointer to the Ethernet device structure.
11587  * @param[in] age
11588  *   Pointer to the aso age action handler.
11589  */
11590 static void
11591 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
11592                                 struct mlx5_aso_age_action *age)
11593 {
11594         struct mlx5_age_info *age_info;
11595         struct mlx5_age_param *age_param = &age->age_params;
11596         struct mlx5_priv *priv = dev->data->dev_private;
11597         uint16_t expected = AGE_CANDIDATE;
11598
11599         age_info = GET_PORT_AGE_INFO(priv);
11600         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
11601                                          AGE_FREE, false, __ATOMIC_RELAXED,
11602                                          __ATOMIC_RELAXED)) {
11603                 /**
11604                  * We need the lock even it is age timeout,
11605                  * since age action may still in process.
11606                  */
11607                 rte_spinlock_lock(&age_info->aged_sl);
11608                 LIST_REMOVE(age, next);
11609                 rte_spinlock_unlock(&age_info->aged_sl);
11610                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
11611         }
11612 }
11613
11614 /**
11615  * Release an ASO age action.
11616  *
11617  * @param[in] dev
11618  *   Pointer to the Ethernet device structure.
11619  * @param[in] age_idx
11620  *   Index of ASO age action to release.
11621  * @param[in] flow
11622  *   True if the release operation is during flow destroy operation.
11623  *   False if the release operation is during action destroy operation.
11624  *
11625  * @return
11626  *   0 when age action was removed, otherwise the number of references.
11627  */
11628 static int
11629 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
11630 {
11631         struct mlx5_priv *priv = dev->data->dev_private;
11632         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11633         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
11634         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
11635
11636         if (!ret) {
11637                 flow_dv_aso_age_remove_from_age(dev, age);
11638                 rte_spinlock_lock(&mng->free_sl);
11639                 LIST_INSERT_HEAD(&mng->free, age, next);
11640                 rte_spinlock_unlock(&mng->free_sl);
11641         }
11642         return ret;
11643 }
11644
11645 /**
11646  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
11647  *
11648  * @param[in] dev
11649  *   Pointer to the Ethernet device structure.
11650  *
11651  * @return
11652  *   0 on success, otherwise negative errno value and rte_errno is set.
11653  */
11654 static int
11655 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
11656 {
11657         struct mlx5_priv *priv = dev->data->dev_private;
11658         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11659         void *old_pools = mng->pools;
11660         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
11661         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
11662         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11663
11664         if (!pools) {
11665                 rte_errno = ENOMEM;
11666                 return -ENOMEM;
11667         }
11668         if (old_pools) {
11669                 memcpy(pools, old_pools,
11670                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
11671                 mlx5_free(old_pools);
11672         } else {
11673                 /* First ASO flow hit allocation - starting ASO data-path. */
11674                 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
11675
11676                 if (ret) {
11677                         mlx5_free(pools);
11678                         return ret;
11679                 }
11680         }
11681         mng->n = resize;
11682         mng->pools = pools;
11683         return 0;
11684 }
11685
11686 /**
11687  * Create and initialize a new ASO aging pool.
11688  *
11689  * @param[in] dev
11690  *   Pointer to the Ethernet device structure.
11691  * @param[out] age_free
11692  *   Where to put the pointer of a new age action.
11693  *
11694  * @return
11695  *   The age actions pool pointer and @p age_free is set on success,
11696  *   NULL otherwise and rte_errno is set.
11697  */
11698 static struct mlx5_aso_age_pool *
11699 flow_dv_age_pool_create(struct rte_eth_dev *dev,
11700                         struct mlx5_aso_age_action **age_free)
11701 {
11702         struct mlx5_priv *priv = dev->data->dev_private;
11703         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11704         struct mlx5_aso_age_pool *pool = NULL;
11705         struct mlx5_devx_obj *obj = NULL;
11706         uint32_t i;
11707
11708         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
11709                                                     priv->sh->pdn);
11710         if (!obj) {
11711                 rte_errno = ENODATA;
11712                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
11713                 return NULL;
11714         }
11715         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
11716         if (!pool) {
11717                 claim_zero(mlx5_devx_cmd_destroy(obj));
11718                 rte_errno = ENOMEM;
11719                 return NULL;
11720         }
11721         pool->flow_hit_aso_obj = obj;
11722         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
11723         rte_spinlock_lock(&mng->resize_sl);
11724         pool->index = mng->next;
11725         /* Resize pools array if there is no room for the new pool in it. */
11726         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
11727                 claim_zero(mlx5_devx_cmd_destroy(obj));
11728                 mlx5_free(pool);
11729                 rte_spinlock_unlock(&mng->resize_sl);
11730                 return NULL;
11731         }
11732         mng->pools[pool->index] = pool;
11733         mng->next++;
11734         rte_spinlock_unlock(&mng->resize_sl);
11735         /* Assign the first action in the new pool, the rest go to free list. */
11736         *age_free = &pool->actions[0];
11737         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
11738                 pool->actions[i].offset = i;
11739                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
11740         }
11741         return pool;
11742 }
11743
11744 /**
11745  * Allocate a ASO aging bit.
11746  *
11747  * @param[in] dev
11748  *   Pointer to the Ethernet device structure.
11749  * @param[out] error
11750  *   Pointer to the error structure.
11751  *
11752  * @return
11753  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
11754  */
11755 static uint32_t
11756 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
11757 {
11758         struct mlx5_priv *priv = dev->data->dev_private;
11759         const struct mlx5_aso_age_pool *pool;
11760         struct mlx5_aso_age_action *age_free = NULL;
11761         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11762
11763         MLX5_ASSERT(mng);
11764         /* Try to get the next free age action bit. */
11765         rte_spinlock_lock(&mng->free_sl);
11766         age_free = LIST_FIRST(&mng->free);
11767         if (age_free) {
11768                 LIST_REMOVE(age_free, next);
11769         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
11770                 rte_spinlock_unlock(&mng->free_sl);
11771                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
11772                                    NULL, "failed to create ASO age pool");
11773                 return 0; /* 0 is an error. */
11774         }
11775         rte_spinlock_unlock(&mng->free_sl);
11776         pool = container_of
11777           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
11778                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
11779                                                                        actions);
11780         if (!age_free->dr_action) {
11781                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
11782                                                  error);
11783
11784                 if (reg_c < 0) {
11785                         rte_flow_error_set(error, rte_errno,
11786                                            RTE_FLOW_ERROR_TYPE_ACTION,
11787                                            NULL, "failed to get reg_c "
11788                                            "for ASO flow hit");
11789                         return 0; /* 0 is an error. */
11790                 }
11791 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
11792                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
11793                                 (priv->sh->rx_domain,
11794                                  pool->flow_hit_aso_obj->obj, age_free->offset,
11795                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
11796                                  (reg_c - REG_C_0));
11797 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
11798                 if (!age_free->dr_action) {
11799                         rte_errno = errno;
11800                         rte_spinlock_lock(&mng->free_sl);
11801                         LIST_INSERT_HEAD(&mng->free, age_free, next);
11802                         rte_spinlock_unlock(&mng->free_sl);
11803                         rte_flow_error_set(error, rte_errno,
11804                                            RTE_FLOW_ERROR_TYPE_ACTION,
11805                                            NULL, "failed to create ASO "
11806                                            "flow hit action");
11807                         return 0; /* 0 is an error. */
11808                 }
11809         }
11810         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
11811         return pool->index | ((age_free->offset + 1) << 16);
11812 }
11813
11814 /**
11815  * Initialize flow ASO age parameters.
11816  *
11817  * @param[in] dev
11818  *   Pointer to rte_eth_dev structure.
11819  * @param[in] age_idx
11820  *   Index of ASO age action.
11821  * @param[in] context
11822  *   Pointer to flow counter age context.
11823  * @param[in] timeout
11824  *   Aging timeout in seconds.
11825  *
11826  */
11827 static void
11828 flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
11829                             uint32_t age_idx,
11830                             void *context,
11831                             uint32_t timeout)
11832 {
11833         struct mlx5_aso_age_action *aso_age;
11834
11835         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
11836         MLX5_ASSERT(aso_age);
11837         aso_age->age_params.context = context;
11838         aso_age->age_params.timeout = timeout;
11839         aso_age->age_params.port_id = dev->data->port_id;
11840         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
11841                          __ATOMIC_RELAXED);
11842         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
11843                          __ATOMIC_RELAXED);
11844 }
11845
11846 static void
11847 flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
11848                                const struct rte_flow_item_integrity *value,
11849                                void *headers_m, void *headers_v)
11850 {
11851         if (mask->l4_ok) {
11852                 /* application l4_ok filter aggregates all hardware l4 filters
11853                  * therefore hw l4_checksum_ok must be implicitly added here.
11854                  */
11855                 struct rte_flow_item_integrity local_item;
11856
11857                 local_item.l4_csum_ok = 1;
11858                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
11859                          local_item.l4_csum_ok);
11860                 if (value->l4_ok) {
11861                         /* application l4_ok = 1 matches sets both hw flags
11862                          * l4_ok and l4_checksum_ok flags to 1.
11863                          */
11864                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11865                                  l4_checksum_ok, local_item.l4_csum_ok);
11866                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok,
11867                                  mask->l4_ok);
11868                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok,
11869                                  value->l4_ok);
11870                 } else {
11871                         /* application l4_ok = 0 matches on hw flag
11872                          * l4_checksum_ok = 0 only.
11873                          */
11874                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11875                                  l4_checksum_ok, 0);
11876                 }
11877         } else if (mask->l4_csum_ok) {
11878                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
11879                          mask->l4_csum_ok);
11880                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
11881                          value->l4_csum_ok);
11882         }
11883 }
11884
11885 static void
11886 flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
11887                                const struct rte_flow_item_integrity *value,
11888                                void *headers_m, void *headers_v,
11889                                bool is_ipv4)
11890 {
11891         if (mask->l3_ok) {
11892                 /* application l3_ok filter aggregates all hardware l3 filters
11893                  * therefore hw ipv4_checksum_ok must be implicitly added here.
11894                  */
11895                 struct rte_flow_item_integrity local_item;
11896
11897                 local_item.ipv4_csum_ok = !!is_ipv4;
11898                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
11899                          local_item.ipv4_csum_ok);
11900                 if (value->l3_ok) {
11901                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11902                                  ipv4_checksum_ok, local_item.ipv4_csum_ok);
11903                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok,
11904                                  mask->l3_ok);
11905                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
11906                                  value->l3_ok);
11907                 } else {
11908                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11909                                  ipv4_checksum_ok, 0);
11910                 }
11911         } else if (mask->ipv4_csum_ok) {
11912                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
11913                          mask->ipv4_csum_ok);
11914                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
11915                          value->ipv4_csum_ok);
11916         }
11917 }
11918
11919 static void
11920 flow_dv_translate_item_integrity(void *matcher, void *key,
11921                                  const struct rte_flow_item *head_item,
11922                                  const struct rte_flow_item *integrity_item)
11923 {
11924         const struct rte_flow_item_integrity *mask = integrity_item->mask;
11925         const struct rte_flow_item_integrity *value = integrity_item->spec;
11926         const struct rte_flow_item *tunnel_item, *end_item, *item;
11927         void *headers_m;
11928         void *headers_v;
11929         uint32_t l3_protocol;
11930
11931         if (!value)
11932                 return;
11933         if (!mask)
11934                 mask = &rte_flow_item_integrity_mask;
11935         if (value->level > 1) {
11936                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
11937                                          inner_headers);
11938                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
11939         } else {
11940                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
11941                                          outer_headers);
11942                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
11943         }
11944         tunnel_item = mlx5_flow_find_tunnel_item(head_item);
11945         if (value->level > 1) {
11946                 /* tunnel item was verified during the item validation */
11947                 item = tunnel_item;
11948                 end_item = mlx5_find_end_item(tunnel_item);
11949         } else {
11950                 item = head_item;
11951                 end_item = tunnel_item ? tunnel_item :
11952                            mlx5_find_end_item(integrity_item);
11953         }
11954         l3_protocol = mask->l3_ok ?
11955                       mlx5_flow_locate_proto_l3(&item, end_item) : 0;
11956         flow_dv_translate_integrity_l3(mask, value, headers_m, headers_v,
11957                                        l3_protocol == RTE_ETHER_TYPE_IPV4);
11958         flow_dv_translate_integrity_l4(mask, value, headers_m, headers_v);
11959 }
11960
11961 /**
11962  * Prepares DV flow counter with aging configuration.
11963  * Gets it by index when exists, creates a new one when doesn't.
11964  *
11965  * @param[in] dev
11966  *   Pointer to rte_eth_dev structure.
11967  * @param[in] dev_flow
11968  *   Pointer to the mlx5_flow.
11969  * @param[in, out] flow
11970  *   Pointer to the sub flow.
11971  * @param[in] count
11972  *   Pointer to the counter action configuration.
11973  * @param[in] age
11974  *   Pointer to the aging action configuration.
11975  * @param[out] error
11976  *   Pointer to the error structure.
11977  *
11978  * @return
11979  *   Pointer to the counter, NULL otherwise.
11980  */
11981 static struct mlx5_flow_counter *
11982 flow_dv_prepare_counter(struct rte_eth_dev *dev,
11983                         struct mlx5_flow *dev_flow,
11984                         struct rte_flow *flow,
11985                         const struct rte_flow_action_count *count,
11986                         const struct rte_flow_action_age *age,
11987                         struct rte_flow_error *error)
11988 {
11989         if (!flow->counter) {
11990                 flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
11991                                                                  count, age);
11992                 if (!flow->counter) {
11993                         rte_flow_error_set(error, rte_errno,
11994                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11995                                            "cannot create counter object.");
11996                         return NULL;
11997                 }
11998         }
11999         return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
12000 }
12001
12002 /*
12003  * Release an ASO CT action by its own device.
12004  *
12005  * @param[in] dev
12006  *   Pointer to the Ethernet device structure.
12007  * @param[in] idx
12008  *   Index of ASO CT action to release.
12009  *
12010  * @return
12011  *   0 when CT action was removed, otherwise the number of references.
12012  */
12013 static inline int
12014 flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx)
12015 {
12016         struct mlx5_priv *priv = dev->data->dev_private;
12017         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12018         uint32_t ret;
12019         struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12020         enum mlx5_aso_ct_state state =
12021                         __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
12022
12023         /* Cannot release when CT is in the ASO SQ. */
12024         if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
12025                 return -1;
12026         ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED);
12027         if (!ret) {
12028                 if (ct->dr_action_orig) {
12029 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12030                         claim_zero(mlx5_glue->destroy_flow_action
12031                                         (ct->dr_action_orig));
12032 #endif
12033                         ct->dr_action_orig = NULL;
12034                 }
12035                 if (ct->dr_action_rply) {
12036 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12037                         claim_zero(mlx5_glue->destroy_flow_action
12038                                         (ct->dr_action_rply));
12039 #endif
12040                         ct->dr_action_rply = NULL;
12041                 }
12042                 /* Clear the state to free, no need in 1st allocation. */
12043                 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE);
12044                 rte_spinlock_lock(&mng->ct_sl);
12045                 LIST_INSERT_HEAD(&mng->free_cts, ct, next);
12046                 rte_spinlock_unlock(&mng->ct_sl);
12047         }
12048         return (int)ret;
12049 }
12050
12051 static inline int
12052 flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx)
12053 {
12054         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
12055         uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
12056         struct rte_eth_dev *owndev = &rte_eth_devices[owner];
12057         RTE_SET_USED(dev);
12058
12059         MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
12060         if (dev->data->dev_started != 1)
12061                 return -1;
12062         return flow_dv_aso_ct_dev_release(owndev, idx);
12063 }
12064
12065 /*
12066  * Resize the ASO CT pools array by 64 pools.
12067  *
12068  * @param[in] dev
12069  *   Pointer to the Ethernet device structure.
12070  *
12071  * @return
12072  *   0 on success, otherwise negative errno value and rte_errno is set.
12073  */
12074 static int
12075 flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev)
12076 {
12077         struct mlx5_priv *priv = dev->data->dev_private;
12078         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12079         void *old_pools = mng->pools;
12080         /* Magic number now, need a macro. */
12081         uint32_t resize = mng->n + 64;
12082         uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize;
12083         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
12084
12085         if (!pools) {
12086                 rte_errno = ENOMEM;
12087                 return -rte_errno;
12088         }
12089         rte_rwlock_write_lock(&mng->resize_rwl);
12090         /* ASO SQ/QP was already initialized in the startup. */
12091         if (old_pools) {
12092                 /* Realloc could be an alternative choice. */
12093                 rte_memcpy(pools, old_pools,
12094                            mng->n * sizeof(struct mlx5_aso_ct_pool *));
12095                 mlx5_free(old_pools);
12096         }
12097         mng->n = resize;
12098         mng->pools = pools;
12099         rte_rwlock_write_unlock(&mng->resize_rwl);
12100         return 0;
12101 }
12102
12103 /*
12104  * Create and initialize a new ASO CT pool.
12105  *
12106  * @param[in] dev
12107  *   Pointer to the Ethernet device structure.
12108  * @param[out] ct_free
12109  *   Where to put the pointer of a new CT action.
12110  *
12111  * @return
12112  *   The CT actions pool pointer and @p ct_free is set on success,
12113  *   NULL otherwise and rte_errno is set.
12114  */
12115 static struct mlx5_aso_ct_pool *
12116 flow_dv_ct_pool_create(struct rte_eth_dev *dev,
12117                        struct mlx5_aso_ct_action **ct_free)
12118 {
12119         struct mlx5_priv *priv = dev->data->dev_private;
12120         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12121         struct mlx5_aso_ct_pool *pool = NULL;
12122         struct mlx5_devx_obj *obj = NULL;
12123         uint32_t i;
12124         uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
12125
12126         obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->ctx,
12127                                                 priv->sh->pdn, log_obj_size);
12128         if (!obj) {
12129                 rte_errno = ENODATA;
12130                 DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
12131                 return NULL;
12132         }
12133         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
12134         if (!pool) {
12135                 rte_errno = ENOMEM;
12136                 claim_zero(mlx5_devx_cmd_destroy(obj));
12137                 return NULL;
12138         }
12139         pool->devx_obj = obj;
12140         pool->index = mng->next;
12141         /* Resize pools array if there is no room for the new pool in it. */
12142         if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) {
12143                 claim_zero(mlx5_devx_cmd_destroy(obj));
12144                 mlx5_free(pool);
12145                 return NULL;
12146         }
12147         mng->pools[pool->index] = pool;
12148         mng->next++;
12149         /* Assign the first action in the new pool, the rest go to free list. */
12150         *ct_free = &pool->actions[0];
12151         /* Lock outside, the list operation is safe here. */
12152         for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
12153                 /* refcnt is 0 when allocating the memory. */
12154                 pool->actions[i].offset = i;
12155                 LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next);
12156         }
12157         return pool;
12158 }
12159
12160 /*
12161  * Allocate a ASO CT action from free list.
12162  *
12163  * @param[in] dev
12164  *   Pointer to the Ethernet device structure.
12165  * @param[out] error
12166  *   Pointer to the error structure.
12167  *
12168  * @return
12169  *   Index to ASO CT action on success, 0 otherwise and rte_errno is set.
12170  */
12171 static uint32_t
12172 flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12173 {
12174         struct mlx5_priv *priv = dev->data->dev_private;
12175         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12176         struct mlx5_aso_ct_action *ct = NULL;
12177         struct mlx5_aso_ct_pool *pool;
12178         uint8_t reg_c;
12179         uint32_t ct_idx;
12180
12181         MLX5_ASSERT(mng);
12182         if (!priv->config.devx) {
12183                 rte_errno = ENOTSUP;
12184                 return 0;
12185         }
12186         /* Get a free CT action, if no, a new pool will be created. */
12187         rte_spinlock_lock(&mng->ct_sl);
12188         ct = LIST_FIRST(&mng->free_cts);
12189         if (ct) {
12190                 LIST_REMOVE(ct, next);
12191         } else if (!flow_dv_ct_pool_create(dev, &ct)) {
12192                 rte_spinlock_unlock(&mng->ct_sl);
12193                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12194                                    NULL, "failed to create ASO CT pool");
12195                 return 0;
12196         }
12197         rte_spinlock_unlock(&mng->ct_sl);
12198         pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
12199         ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
12200         /* 0: inactive, 1: created, 2+: used by flows. */
12201         __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
12202         reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
12203         if (!ct->dr_action_orig) {
12204 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12205                 ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso
12206                         (priv->sh->rx_domain, pool->devx_obj->obj,
12207                          ct->offset,
12208                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR,
12209                          reg_c - REG_C_0);
12210 #else
12211                 RTE_SET_USED(reg_c);
12212 #endif
12213                 if (!ct->dr_action_orig) {
12214                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12215                         rte_flow_error_set(error, rte_errno,
12216                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12217                                            "failed to create ASO CT action");
12218                         return 0;
12219                 }
12220         }
12221         if (!ct->dr_action_rply) {
12222 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12223                 ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso
12224                         (priv->sh->rx_domain, pool->devx_obj->obj,
12225                          ct->offset,
12226                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER,
12227                          reg_c - REG_C_0);
12228 #endif
12229                 if (!ct->dr_action_rply) {
12230                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12231                         rte_flow_error_set(error, rte_errno,
12232                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12233                                            "failed to create ASO CT action");
12234                         return 0;
12235                 }
12236         }
12237         return ct_idx;
12238 }
12239
12240 /*
12241  * Create a conntrack object with context and actions by using ASO mechanism.
12242  *
12243  * @param[in] dev
12244  *   Pointer to rte_eth_dev structure.
12245  * @param[in] pro
12246  *   Pointer to conntrack information profile.
12247  * @param[out] error
12248  *   Pointer to the error structure.
12249  *
12250  * @return
12251  *   Index to conntrack object on success, 0 otherwise.
12252  */
12253 static uint32_t
12254 flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
12255                                    const struct rte_flow_action_conntrack *pro,
12256                                    struct rte_flow_error *error)
12257 {
12258         struct mlx5_priv *priv = dev->data->dev_private;
12259         struct mlx5_dev_ctx_shared *sh = priv->sh;
12260         struct mlx5_aso_ct_action *ct;
12261         uint32_t idx;
12262
12263         if (!sh->ct_aso_en)
12264                 return rte_flow_error_set(error, ENOTSUP,
12265                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12266                                           "Connection is not supported");
12267         idx = flow_dv_aso_ct_alloc(dev, error);
12268         if (!idx)
12269                 return rte_flow_error_set(error, rte_errno,
12270                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12271                                           "Failed to allocate CT object");
12272         ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12273         if (mlx5_aso_ct_update_by_wqe(sh, ct, pro))
12274                 return rte_flow_error_set(error, EBUSY,
12275                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12276                                           "Failed to update CT");
12277         ct->is_original = !!pro->is_original_dir;
12278         ct->peer = pro->peer_port;
12279         return idx;
12280 }
12281
12282 /**
12283  * Fill the flow with DV spec, lock free
12284  * (mutex should be acquired by caller).
12285  *
12286  * @param[in] dev
12287  *   Pointer to rte_eth_dev structure.
12288  * @param[in, out] dev_flow
12289  *   Pointer to the sub flow.
12290  * @param[in] attr
12291  *   Pointer to the flow attributes.
12292  * @param[in] items
12293  *   Pointer to the list of items.
12294  * @param[in] actions
12295  *   Pointer to the list of actions.
12296  * @param[out] error
12297  *   Pointer to the error structure.
12298  *
12299  * @return
12300  *   0 on success, a negative errno value otherwise and rte_errno is set.
12301  */
12302 static int
12303 flow_dv_translate(struct rte_eth_dev *dev,
12304                   struct mlx5_flow *dev_flow,
12305                   const struct rte_flow_attr *attr,
12306                   const struct rte_flow_item items[],
12307                   const struct rte_flow_action actions[],
12308                   struct rte_flow_error *error)
12309 {
12310         struct mlx5_priv *priv = dev->data->dev_private;
12311         struct mlx5_dev_config *dev_conf = &priv->config;
12312         struct rte_flow *flow = dev_flow->flow;
12313         struct mlx5_flow_handle *handle = dev_flow->handle;
12314         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12315         struct mlx5_flow_rss_desc *rss_desc;
12316         uint64_t item_flags = 0;
12317         uint64_t last_item = 0;
12318         uint64_t action_flags = 0;
12319         struct mlx5_flow_dv_matcher matcher = {
12320                 .mask = {
12321                         .size = sizeof(matcher.mask.buf),
12322                 },
12323         };
12324         int actions_n = 0;
12325         bool actions_end = false;
12326         union {
12327                 struct mlx5_flow_dv_modify_hdr_resource res;
12328                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
12329                             sizeof(struct mlx5_modification_cmd) *
12330                             (MLX5_MAX_MODIFY_NUM + 1)];
12331         } mhdr_dummy;
12332         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
12333         const struct rte_flow_action_count *count = NULL;
12334         const struct rte_flow_action_age *non_shared_age = NULL;
12335         union flow_dv_attr flow_attr = { .attr = 0 };
12336         uint32_t tag_be;
12337         union mlx5_flow_tbl_key tbl_key;
12338         uint32_t modify_action_position = UINT32_MAX;
12339         void *match_mask = matcher.mask.buf;
12340         void *match_value = dev_flow->dv.value.buf;
12341         uint8_t next_protocol = 0xff;
12342         struct rte_vlan_hdr vlan = { 0 };
12343         struct mlx5_flow_dv_dest_array_resource mdest_res;
12344         struct mlx5_flow_dv_sample_resource sample_res;
12345         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
12346         const struct rte_flow_action_sample *sample = NULL;
12347         struct mlx5_flow_sub_actions_list *sample_act;
12348         uint32_t sample_act_pos = UINT32_MAX;
12349         uint32_t age_act_pos = UINT32_MAX;
12350         uint32_t num_of_dest = 0;
12351         int tmp_actions_n = 0;
12352         uint32_t table;
12353         int ret = 0;
12354         const struct mlx5_flow_tunnel *tunnel = NULL;
12355         struct flow_grp_info grp_info = {
12356                 .external = !!dev_flow->external,
12357                 .transfer = !!attr->transfer,
12358                 .fdb_def_rule = !!priv->fdb_def_rule,
12359                 .skip_scale = dev_flow->skip_scale &
12360                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
12361                 .std_tbl_fix = true,
12362         };
12363         const struct rte_flow_item *head_item = items;
12364
12365         if (!wks)
12366                 return rte_flow_error_set(error, ENOMEM,
12367                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12368                                           NULL,
12369                                           "failed to push flow workspace");
12370         rss_desc = &wks->rss_desc;
12371         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
12372         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
12373         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12374                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12375         /* update normal path action resource into last index of array */
12376         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
12377         if (is_tunnel_offload_active(dev)) {
12378                 if (dev_flow->tunnel) {
12379                         RTE_VERIFY(dev_flow->tof_type ==
12380                                    MLX5_TUNNEL_OFFLOAD_MISS_RULE);
12381                         tunnel = dev_flow->tunnel;
12382                 } else {
12383                         tunnel = mlx5_get_tof(items, actions,
12384                                               &dev_flow->tof_type);
12385                         dev_flow->tunnel = tunnel;
12386                 }
12387                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
12388                                         (dev, attr, tunnel, dev_flow->tof_type);
12389         }
12390         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12391                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12392         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
12393                                        &grp_info, error);
12394         if (ret)
12395                 return ret;
12396         dev_flow->dv.group = table;
12397         if (attr->transfer)
12398                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
12399         /* number of actions must be set to 0 in case of dirty stack. */
12400         mhdr_res->actions_num = 0;
12401         if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
12402                 /*
12403                  * do not add decap action if match rule drops packet
12404                  * HW rejects rules with decap & drop
12405                  *
12406                  * if tunnel match rule was inserted before matching tunnel set
12407                  * rule flow table used in the match rule must be registered.
12408                  * current implementation handles that in the
12409                  * flow_dv_match_register() at the function end.
12410                  */
12411                 bool add_decap = true;
12412                 const struct rte_flow_action *ptr = actions;
12413
12414                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
12415                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
12416                                 add_decap = false;
12417                                 break;
12418                         }
12419                 }
12420                 if (add_decap) {
12421                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12422                                                            attr->transfer,
12423                                                            error))
12424                                 return -rte_errno;
12425                         dev_flow->dv.actions[actions_n++] =
12426                                         dev_flow->dv.encap_decap->action;
12427                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12428                 }
12429         }
12430         for (; !actions_end ; actions++) {
12431                 const struct rte_flow_action_queue *queue;
12432                 const struct rte_flow_action_rss *rss;
12433                 const struct rte_flow_action *action = actions;
12434                 const uint8_t *rss_key;
12435                 struct mlx5_flow_tbl_resource *tbl;
12436                 struct mlx5_aso_age_action *age_act;
12437                 struct mlx5_flow_counter *cnt_act;
12438                 uint32_t port_id = 0;
12439                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
12440                 int action_type = actions->type;
12441                 const struct rte_flow_action *found_action = NULL;
12442                 uint32_t jump_group = 0;
12443                 uint32_t owner_idx;
12444                 struct mlx5_aso_ct_action *ct;
12445
12446                 if (!mlx5_flow_os_action_supported(action_type))
12447                         return rte_flow_error_set(error, ENOTSUP,
12448                                                   RTE_FLOW_ERROR_TYPE_ACTION,
12449                                                   actions,
12450                                                   "action not supported");
12451                 switch (action_type) {
12452                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
12453                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
12454                         break;
12455                 case RTE_FLOW_ACTION_TYPE_VOID:
12456                         break;
12457                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
12458                         if (flow_dv_translate_action_port_id(dev, action,
12459                                                              &port_id, error))
12460                                 return -rte_errno;
12461                         port_id_resource.port_id = port_id;
12462                         MLX5_ASSERT(!handle->rix_port_id_action);
12463                         if (flow_dv_port_id_action_resource_register
12464                             (dev, &port_id_resource, dev_flow, error))
12465                                 return -rte_errno;
12466                         dev_flow->dv.actions[actions_n++] =
12467                                         dev_flow->dv.port_id_action->action;
12468                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12469                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
12470                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12471                         num_of_dest++;
12472                         break;
12473                 case RTE_FLOW_ACTION_TYPE_FLAG:
12474                         action_flags |= MLX5_FLOW_ACTION_FLAG;
12475                         dev_flow->handle->mark = 1;
12476                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12477                                 struct rte_flow_action_mark mark = {
12478                                         .id = MLX5_FLOW_MARK_DEFAULT,
12479                                 };
12480
12481                                 if (flow_dv_convert_action_mark(dev, &mark,
12482                                                                 mhdr_res,
12483                                                                 error))
12484                                         return -rte_errno;
12485                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12486                                 break;
12487                         }
12488                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
12489                         /*
12490                          * Only one FLAG or MARK is supported per device flow
12491                          * right now. So the pointer to the tag resource must be
12492                          * zero before the register process.
12493                          */
12494                         MLX5_ASSERT(!handle->dvh.rix_tag);
12495                         if (flow_dv_tag_resource_register(dev, tag_be,
12496                                                           dev_flow, error))
12497                                 return -rte_errno;
12498                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12499                         dev_flow->dv.actions[actions_n++] =
12500                                         dev_flow->dv.tag_resource->action;
12501                         break;
12502                 case RTE_FLOW_ACTION_TYPE_MARK:
12503                         action_flags |= MLX5_FLOW_ACTION_MARK;
12504                         dev_flow->handle->mark = 1;
12505                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12506                                 const struct rte_flow_action_mark *mark =
12507                                         (const struct rte_flow_action_mark *)
12508                                                 actions->conf;
12509
12510                                 if (flow_dv_convert_action_mark(dev, mark,
12511                                                                 mhdr_res,
12512                                                                 error))
12513                                         return -rte_errno;
12514                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12515                                 break;
12516                         }
12517                         /* Fall-through */
12518                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
12519                         /* Legacy (non-extensive) MARK action. */
12520                         tag_be = mlx5_flow_mark_set
12521                               (((const struct rte_flow_action_mark *)
12522                                (actions->conf))->id);
12523                         MLX5_ASSERT(!handle->dvh.rix_tag);
12524                         if (flow_dv_tag_resource_register(dev, tag_be,
12525                                                           dev_flow, error))
12526                                 return -rte_errno;
12527                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12528                         dev_flow->dv.actions[actions_n++] =
12529                                         dev_flow->dv.tag_resource->action;
12530                         break;
12531                 case RTE_FLOW_ACTION_TYPE_SET_META:
12532                         if (flow_dv_convert_action_set_meta
12533                                 (dev, mhdr_res, attr,
12534                                  (const struct rte_flow_action_set_meta *)
12535                                   actions->conf, error))
12536                                 return -rte_errno;
12537                         action_flags |= MLX5_FLOW_ACTION_SET_META;
12538                         break;
12539                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
12540                         if (flow_dv_convert_action_set_tag
12541                                 (dev, mhdr_res,
12542                                  (const struct rte_flow_action_set_tag *)
12543                                   actions->conf, error))
12544                                 return -rte_errno;
12545                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12546                         break;
12547                 case RTE_FLOW_ACTION_TYPE_DROP:
12548                         action_flags |= MLX5_FLOW_ACTION_DROP;
12549                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
12550                         break;
12551                 case RTE_FLOW_ACTION_TYPE_QUEUE:
12552                         queue = actions->conf;
12553                         rss_desc->queue_num = 1;
12554                         rss_desc->queue[0] = queue->index;
12555                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
12556                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
12557                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
12558                         num_of_dest++;
12559                         break;
12560                 case RTE_FLOW_ACTION_TYPE_RSS:
12561                         rss = actions->conf;
12562                         memcpy(rss_desc->queue, rss->queue,
12563                                rss->queue_num * sizeof(uint16_t));
12564                         rss_desc->queue_num = rss->queue_num;
12565                         /* NULL RSS key indicates default RSS key. */
12566                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
12567                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12568                         /*
12569                          * rss->level and rss.types should be set in advance
12570                          * when expanding items for RSS.
12571                          */
12572                         action_flags |= MLX5_FLOW_ACTION_RSS;
12573                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
12574                                 MLX5_FLOW_FATE_SHARED_RSS :
12575                                 MLX5_FLOW_FATE_QUEUE;
12576                         break;
12577                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
12578                         flow->age = (uint32_t)(uintptr_t)(action->conf);
12579                         age_act = flow_aso_age_get_by_idx(dev, flow->age);
12580                         __atomic_fetch_add(&age_act->refcnt, 1,
12581                                            __ATOMIC_RELAXED);
12582                         age_act_pos = actions_n++;
12583                         action_flags |= MLX5_FLOW_ACTION_AGE;
12584                         break;
12585                 case RTE_FLOW_ACTION_TYPE_AGE:
12586                         non_shared_age = action->conf;
12587                         age_act_pos = actions_n++;
12588                         action_flags |= MLX5_FLOW_ACTION_AGE;
12589                         break;
12590                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
12591                         flow->counter = (uint32_t)(uintptr_t)(action->conf);
12592                         cnt_act = flow_dv_counter_get_by_idx(dev, flow->counter,
12593                                                              NULL);
12594                         __atomic_fetch_add(&cnt_act->shared_info.refcnt, 1,
12595                                            __ATOMIC_RELAXED);
12596                         /* Save information first, will apply later. */
12597                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12598                         break;
12599                 case RTE_FLOW_ACTION_TYPE_COUNT:
12600                         if (!dev_conf->devx) {
12601                                 return rte_flow_error_set
12602                                               (error, ENOTSUP,
12603                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12604                                                NULL,
12605                                                "count action not supported");
12606                         }
12607                         /* Save information first, will apply later. */
12608                         count = action->conf;
12609                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12610                         break;
12611                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
12612                         dev_flow->dv.actions[actions_n++] =
12613                                                 priv->sh->pop_vlan_action;
12614                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
12615                         break;
12616                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
12617                         if (!(action_flags &
12618                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
12619                                 flow_dev_get_vlan_info_from_items(items, &vlan);
12620                         vlan.eth_proto = rte_be_to_cpu_16
12621                              ((((const struct rte_flow_action_of_push_vlan *)
12622                                                    actions->conf)->ethertype));
12623                         found_action = mlx5_flow_find_action
12624                                         (actions + 1,
12625                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
12626                         if (found_action)
12627                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12628                         found_action = mlx5_flow_find_action
12629                                         (actions + 1,
12630                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
12631                         if (found_action)
12632                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12633                         if (flow_dv_create_action_push_vlan
12634                                             (dev, attr, &vlan, dev_flow, error))
12635                                 return -rte_errno;
12636                         dev_flow->dv.actions[actions_n++] =
12637                                         dev_flow->dv.push_vlan_res->action;
12638                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
12639                         break;
12640                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
12641                         /* of_vlan_push action handled this action */
12642                         MLX5_ASSERT(action_flags &
12643                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
12644                         break;
12645                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
12646                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
12647                                 break;
12648                         flow_dev_get_vlan_info_from_items(items, &vlan);
12649                         mlx5_update_vlan_vid_pcp(actions, &vlan);
12650                         /* If no VLAN push - this is a modify header action */
12651                         if (flow_dv_convert_action_modify_vlan_vid
12652                                                 (mhdr_res, actions, error))
12653                                 return -rte_errno;
12654                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
12655                         break;
12656                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
12657                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
12658                         if (flow_dv_create_action_l2_encap(dev, actions,
12659                                                            dev_flow,
12660                                                            attr->transfer,
12661                                                            error))
12662                                 return -rte_errno;
12663                         dev_flow->dv.actions[actions_n++] =
12664                                         dev_flow->dv.encap_decap->action;
12665                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12666                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12667                                 sample_act->action_flags |=
12668                                                         MLX5_FLOW_ACTION_ENCAP;
12669                         break;
12670                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
12671                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
12672                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12673                                                            attr->transfer,
12674                                                            error))
12675                                 return -rte_errno;
12676                         dev_flow->dv.actions[actions_n++] =
12677                                         dev_flow->dv.encap_decap->action;
12678                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12679                         break;
12680                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
12681                         /* Handle encap with preceding decap. */
12682                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
12683                                 if (flow_dv_create_action_raw_encap
12684                                         (dev, actions, dev_flow, attr, error))
12685                                         return -rte_errno;
12686                                 dev_flow->dv.actions[actions_n++] =
12687                                         dev_flow->dv.encap_decap->action;
12688                         } else {
12689                                 /* Handle encap without preceding decap. */
12690                                 if (flow_dv_create_action_l2_encap
12691                                     (dev, actions, dev_flow, attr->transfer,
12692                                      error))
12693                                         return -rte_errno;
12694                                 dev_flow->dv.actions[actions_n++] =
12695                                         dev_flow->dv.encap_decap->action;
12696                         }
12697                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12698                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12699                                 sample_act->action_flags |=
12700                                                         MLX5_FLOW_ACTION_ENCAP;
12701                         break;
12702                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
12703                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
12704                                 ;
12705                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
12706                                 if (flow_dv_create_action_l2_decap
12707                                     (dev, dev_flow, attr->transfer, error))
12708                                         return -rte_errno;
12709                                 dev_flow->dv.actions[actions_n++] =
12710                                         dev_flow->dv.encap_decap->action;
12711                         }
12712                         /* If decap is followed by encap, handle it at encap. */
12713                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12714                         break;
12715                 case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
12716                         dev_flow->dv.actions[actions_n++] =
12717                                 (void *)(uintptr_t)action->conf;
12718                         action_flags |= MLX5_FLOW_ACTION_JUMP;
12719                         break;
12720                 case RTE_FLOW_ACTION_TYPE_JUMP:
12721                         jump_group = ((const struct rte_flow_action_jump *)
12722                                                         action->conf)->group;
12723                         grp_info.std_tbl_fix = 0;
12724                         if (dev_flow->skip_scale &
12725                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
12726                                 grp_info.skip_scale = 1;
12727                         else
12728                                 grp_info.skip_scale = 0;
12729                         ret = mlx5_flow_group_to_table(dev, tunnel,
12730                                                        jump_group,
12731                                                        &table,
12732                                                        &grp_info, error);
12733                         if (ret)
12734                                 return ret;
12735                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
12736                                                        attr->transfer,
12737                                                        !!dev_flow->external,
12738                                                        tunnel, jump_group, 0,
12739                                                        0, error);
12740                         if (!tbl)
12741                                 return rte_flow_error_set
12742                                                 (error, errno,
12743                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12744                                                  NULL,
12745                                                  "cannot create jump action.");
12746                         if (flow_dv_jump_tbl_resource_register
12747                             (dev, tbl, dev_flow, error)) {
12748                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
12749                                 return rte_flow_error_set
12750                                                 (error, errno,
12751                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12752                                                  NULL,
12753                                                  "cannot create jump action.");
12754                         }
12755                         dev_flow->dv.actions[actions_n++] =
12756                                         dev_flow->dv.jump->action;
12757                         action_flags |= MLX5_FLOW_ACTION_JUMP;
12758                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
12759                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
12760                         num_of_dest++;
12761                         break;
12762                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
12763                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
12764                         if (flow_dv_convert_action_modify_mac
12765                                         (mhdr_res, actions, error))
12766                                 return -rte_errno;
12767                         action_flags |= actions->type ==
12768                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
12769                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
12770                                         MLX5_FLOW_ACTION_SET_MAC_DST;
12771                         break;
12772                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
12773                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
12774                         if (flow_dv_convert_action_modify_ipv4
12775                                         (mhdr_res, actions, error))
12776                                 return -rte_errno;
12777                         action_flags |= actions->type ==
12778                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
12779                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
12780                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
12781                         break;
12782                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
12783                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
12784                         if (flow_dv_convert_action_modify_ipv6
12785                                         (mhdr_res, actions, error))
12786                                 return -rte_errno;
12787                         action_flags |= actions->type ==
12788                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
12789                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
12790                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
12791                         break;
12792                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
12793                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
12794                         if (flow_dv_convert_action_modify_tp
12795                                         (mhdr_res, actions, items,
12796                                          &flow_attr, dev_flow, !!(action_flags &
12797                                          MLX5_FLOW_ACTION_DECAP), error))
12798                                 return -rte_errno;
12799                         action_flags |= actions->type ==
12800                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
12801                                         MLX5_FLOW_ACTION_SET_TP_SRC :
12802                                         MLX5_FLOW_ACTION_SET_TP_DST;
12803                         break;
12804                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
12805                         if (flow_dv_convert_action_modify_dec_ttl
12806                                         (mhdr_res, items, &flow_attr, dev_flow,
12807                                          !!(action_flags &
12808                                          MLX5_FLOW_ACTION_DECAP), error))
12809                                 return -rte_errno;
12810                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
12811                         break;
12812                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
12813                         if (flow_dv_convert_action_modify_ttl
12814                                         (mhdr_res, actions, items, &flow_attr,
12815                                          dev_flow, !!(action_flags &
12816                                          MLX5_FLOW_ACTION_DECAP), error))
12817                                 return -rte_errno;
12818                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
12819                         break;
12820                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
12821                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
12822                         if (flow_dv_convert_action_modify_tcp_seq
12823                                         (mhdr_res, actions, error))
12824                                 return -rte_errno;
12825                         action_flags |= actions->type ==
12826                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
12827                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
12828                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
12829                         break;
12830
12831                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
12832                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
12833                         if (flow_dv_convert_action_modify_tcp_ack
12834                                         (mhdr_res, actions, error))
12835                                 return -rte_errno;
12836                         action_flags |= actions->type ==
12837                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
12838                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
12839                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
12840                         break;
12841                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
12842                         if (flow_dv_convert_action_set_reg
12843                                         (mhdr_res, actions, error))
12844                                 return -rte_errno;
12845                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12846                         break;
12847                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
12848                         if (flow_dv_convert_action_copy_mreg
12849                                         (dev, mhdr_res, actions, error))
12850                                 return -rte_errno;
12851                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12852                         break;
12853                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
12854                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
12855                         dev_flow->handle->fate_action =
12856                                         MLX5_FLOW_FATE_DEFAULT_MISS;
12857                         break;
12858                 case RTE_FLOW_ACTION_TYPE_METER:
12859                         if (!wks->fm)
12860                                 return rte_flow_error_set(error, rte_errno,
12861                                         RTE_FLOW_ERROR_TYPE_ACTION,
12862                                         NULL, "Failed to get meter in flow.");
12863                         /* Set the meter action. */
12864                         dev_flow->dv.actions[actions_n++] =
12865                                 wks->fm->meter_action;
12866                         action_flags |= MLX5_FLOW_ACTION_METER;
12867                         break;
12868                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
12869                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
12870                                                               actions, error))
12871                                 return -rte_errno;
12872                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
12873                         break;
12874                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
12875                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
12876                                                               actions, error))
12877                                 return -rte_errno;
12878                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
12879                         break;
12880                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
12881                         sample_act_pos = actions_n;
12882                         sample = (const struct rte_flow_action_sample *)
12883                                  action->conf;
12884                         actions_n++;
12885                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
12886                         /* put encap action into group if work with port id */
12887                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
12888                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
12889                                 sample_act->action_flags |=
12890                                                         MLX5_FLOW_ACTION_ENCAP;
12891                         break;
12892                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
12893                         if (flow_dv_convert_action_modify_field
12894                                         (dev, mhdr_res, actions, attr, error))
12895                                 return -rte_errno;
12896                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
12897                         break;
12898                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
12899                         owner_idx = (uint32_t)(uintptr_t)action->conf;
12900                         ct = flow_aso_ct_get_by_idx(dev, owner_idx);
12901                         if (!ct)
12902                                 return rte_flow_error_set(error, EINVAL,
12903                                                 RTE_FLOW_ERROR_TYPE_ACTION,
12904                                                 NULL,
12905                                                 "Failed to get CT object.");
12906                         if (mlx5_aso_ct_available(priv->sh, ct))
12907                                 return rte_flow_error_set(error, rte_errno,
12908                                                 RTE_FLOW_ERROR_TYPE_ACTION,
12909                                                 NULL,
12910                                                 "CT is unavailable.");
12911                         if (ct->is_original)
12912                                 dev_flow->dv.actions[actions_n] =
12913                                                         ct->dr_action_orig;
12914                         else
12915                                 dev_flow->dv.actions[actions_n] =
12916                                                         ct->dr_action_rply;
12917                         flow->indirect_type = MLX5_INDIRECT_ACTION_TYPE_CT;
12918                         flow->ct = owner_idx;
12919                         __atomic_fetch_add(&ct->refcnt, 1, __ATOMIC_RELAXED);
12920                         actions_n++;
12921                         action_flags |= MLX5_FLOW_ACTION_CT;
12922                         break;
12923                 case RTE_FLOW_ACTION_TYPE_END:
12924                         actions_end = true;
12925                         if (mhdr_res->actions_num) {
12926                                 /* create modify action if needed. */
12927                                 if (flow_dv_modify_hdr_resource_register
12928                                         (dev, mhdr_res, dev_flow, error))
12929                                         return -rte_errno;
12930                                 dev_flow->dv.actions[modify_action_position] =
12931                                         handle->dvh.modify_hdr->action;
12932                         }
12933                         /*
12934                          * Handle AGE and COUNT action by single HW counter
12935                          * when they are not shared.
12936                          */
12937                         if (action_flags & MLX5_FLOW_ACTION_AGE) {
12938                                 if ((non_shared_age &&
12939                                      count && !count->shared) ||
12940                                     !(priv->sh->flow_hit_aso_en &&
12941                                       (attr->group || attr->transfer))) {
12942                                         /* Creates age by counters. */
12943                                         cnt_act = flow_dv_prepare_counter
12944                                                                 (dev, dev_flow,
12945                                                                  flow, count,
12946                                                                  non_shared_age,
12947                                                                  error);
12948                                         if (!cnt_act)
12949                                                 return -rte_errno;
12950                                         dev_flow->dv.actions[age_act_pos] =
12951                                                                 cnt_act->action;
12952                                         break;
12953                                 }
12954                                 if (!flow->age && non_shared_age) {
12955                                         flow->age = flow_dv_aso_age_alloc
12956                                                                 (dev, error);
12957                                         if (!flow->age)
12958                                                 return -rte_errno;
12959                                         flow_dv_aso_age_params_init
12960                                                     (dev, flow->age,
12961                                                      non_shared_age->context ?
12962                                                      non_shared_age->context :
12963                                                      (void *)(uintptr_t)
12964                                                      (dev_flow->flow_idx),
12965                                                      non_shared_age->timeout);
12966                                 }
12967                                 age_act = flow_aso_age_get_by_idx(dev,
12968                                                                   flow->age);
12969                                 dev_flow->dv.actions[age_act_pos] =
12970                                                              age_act->dr_action;
12971                         }
12972                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
12973                                 /*
12974                                  * Create one count action, to be used
12975                                  * by all sub-flows.
12976                                  */
12977                                 cnt_act = flow_dv_prepare_counter(dev, dev_flow,
12978                                                                   flow, count,
12979                                                                   NULL, error);
12980                                 if (!cnt_act)
12981                                         return -rte_errno;
12982                                 dev_flow->dv.actions[actions_n++] =
12983                                                                 cnt_act->action;
12984                         }
12985                 default:
12986                         break;
12987                 }
12988                 if (mhdr_res->actions_num &&
12989                     modify_action_position == UINT32_MAX)
12990                         modify_action_position = actions_n++;
12991         }
12992         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
12993                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
12994                 int item_type = items->type;
12995
12996                 if (!mlx5_flow_os_item_supported(item_type))
12997                         return rte_flow_error_set(error, ENOTSUP,
12998                                                   RTE_FLOW_ERROR_TYPE_ITEM,
12999                                                   NULL, "item not supported");
13000                 switch (item_type) {
13001                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
13002                         flow_dv_translate_item_port_id
13003                                 (dev, match_mask, match_value, items, attr);
13004                         last_item = MLX5_FLOW_ITEM_PORT_ID;
13005                         break;
13006                 case RTE_FLOW_ITEM_TYPE_ETH:
13007                         flow_dv_translate_item_eth(match_mask, match_value,
13008                                                    items, tunnel,
13009                                                    dev_flow->dv.group);
13010                         matcher.priority = action_flags &
13011                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
13012                                         !dev_flow->external ?
13013                                         MLX5_PRIORITY_MAP_L3 :
13014                                         MLX5_PRIORITY_MAP_L2;
13015                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
13016                                              MLX5_FLOW_LAYER_OUTER_L2;
13017                         break;
13018                 case RTE_FLOW_ITEM_TYPE_VLAN:
13019                         flow_dv_translate_item_vlan(dev_flow,
13020                                                     match_mask, match_value,
13021                                                     items, tunnel,
13022                                                     dev_flow->dv.group);
13023                         matcher.priority = MLX5_PRIORITY_MAP_L2;
13024                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
13025                                               MLX5_FLOW_LAYER_INNER_VLAN) :
13026                                              (MLX5_FLOW_LAYER_OUTER_L2 |
13027                                               MLX5_FLOW_LAYER_OUTER_VLAN);
13028                         break;
13029                 case RTE_FLOW_ITEM_TYPE_IPV4:
13030                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13031                                                   &item_flags, &tunnel);
13032                         flow_dv_translate_item_ipv4(match_mask, match_value,
13033                                                     items, tunnel,
13034                                                     dev_flow->dv.group);
13035                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13036                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
13037                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
13038                         if (items->mask != NULL &&
13039                             ((const struct rte_flow_item_ipv4 *)
13040                              items->mask)->hdr.next_proto_id) {
13041                                 next_protocol =
13042                                         ((const struct rte_flow_item_ipv4 *)
13043                                          (items->spec))->hdr.next_proto_id;
13044                                 next_protocol &=
13045                                         ((const struct rte_flow_item_ipv4 *)
13046                                          (items->mask))->hdr.next_proto_id;
13047                         } else {
13048                                 /* Reset for inner layer. */
13049                                 next_protocol = 0xff;
13050                         }
13051                         break;
13052                 case RTE_FLOW_ITEM_TYPE_IPV6:
13053                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13054                                                   &item_flags, &tunnel);
13055                         flow_dv_translate_item_ipv6(match_mask, match_value,
13056                                                     items, tunnel,
13057                                                     dev_flow->dv.group);
13058                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13059                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
13060                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
13061                         if (items->mask != NULL &&
13062                             ((const struct rte_flow_item_ipv6 *)
13063                              items->mask)->hdr.proto) {
13064                                 next_protocol =
13065                                         ((const struct rte_flow_item_ipv6 *)
13066                                          items->spec)->hdr.proto;
13067                                 next_protocol &=
13068                                         ((const struct rte_flow_item_ipv6 *)
13069                                          items->mask)->hdr.proto;
13070                         } else {
13071                                 /* Reset for inner layer. */
13072                                 next_protocol = 0xff;
13073                         }
13074                         break;
13075                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
13076                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
13077                                                              match_value,
13078                                                              items, tunnel);
13079                         last_item = tunnel ?
13080                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
13081                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
13082                         if (items->mask != NULL &&
13083                             ((const struct rte_flow_item_ipv6_frag_ext *)
13084                              items->mask)->hdr.next_header) {
13085                                 next_protocol =
13086                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13087                                  items->spec)->hdr.next_header;
13088                                 next_protocol &=
13089                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13090                                  items->mask)->hdr.next_header;
13091                         } else {
13092                                 /* Reset for inner layer. */
13093                                 next_protocol = 0xff;
13094                         }
13095                         break;
13096                 case RTE_FLOW_ITEM_TYPE_TCP:
13097                         flow_dv_translate_item_tcp(match_mask, match_value,
13098                                                    items, tunnel);
13099                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13100                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
13101                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
13102                         break;
13103                 case RTE_FLOW_ITEM_TYPE_UDP:
13104                         flow_dv_translate_item_udp(match_mask, match_value,
13105                                                    items, tunnel);
13106                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13107                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
13108                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
13109                         break;
13110                 case RTE_FLOW_ITEM_TYPE_GRE:
13111                         flow_dv_translate_item_gre(match_mask, match_value,
13112                                                    items, tunnel);
13113                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13114                         last_item = MLX5_FLOW_LAYER_GRE;
13115                         break;
13116                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
13117                         flow_dv_translate_item_gre_key(match_mask,
13118                                                        match_value, items);
13119                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
13120                         break;
13121                 case RTE_FLOW_ITEM_TYPE_NVGRE:
13122                         flow_dv_translate_item_nvgre(match_mask, match_value,
13123                                                      items, tunnel);
13124                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13125                         last_item = MLX5_FLOW_LAYER_GRE;
13126                         break;
13127                 case RTE_FLOW_ITEM_TYPE_VXLAN:
13128                         flow_dv_translate_item_vxlan(dev, attr,
13129                                                      match_mask, match_value,
13130                                                      items, tunnel);
13131                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13132                         last_item = MLX5_FLOW_LAYER_VXLAN;
13133                         break;
13134                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
13135                         flow_dv_translate_item_vxlan_gpe(match_mask,
13136                                                          match_value, items,
13137                                                          tunnel);
13138                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13139                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
13140                         break;
13141                 case RTE_FLOW_ITEM_TYPE_GENEVE:
13142                         flow_dv_translate_item_geneve(match_mask, match_value,
13143                                                       items, tunnel);
13144                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13145                         last_item = MLX5_FLOW_LAYER_GENEVE;
13146                         break;
13147                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
13148                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
13149                                                           match_value,
13150                                                           items, error);
13151                         if (ret)
13152                                 return rte_flow_error_set(error, -ret,
13153                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13154                                         "cannot create GENEVE TLV option");
13155                         flow->geneve_tlv_option = 1;
13156                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
13157                         break;
13158                 case RTE_FLOW_ITEM_TYPE_MPLS:
13159                         flow_dv_translate_item_mpls(match_mask, match_value,
13160                                                     items, last_item, tunnel);
13161                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13162                         last_item = MLX5_FLOW_LAYER_MPLS;
13163                         break;
13164                 case RTE_FLOW_ITEM_TYPE_MARK:
13165                         flow_dv_translate_item_mark(dev, match_mask,
13166                                                     match_value, items);
13167                         last_item = MLX5_FLOW_ITEM_MARK;
13168                         break;
13169                 case RTE_FLOW_ITEM_TYPE_META:
13170                         flow_dv_translate_item_meta(dev, match_mask,
13171                                                     match_value, attr, items);
13172                         last_item = MLX5_FLOW_ITEM_METADATA;
13173                         break;
13174                 case RTE_FLOW_ITEM_TYPE_ICMP:
13175                         flow_dv_translate_item_icmp(match_mask, match_value,
13176                                                     items, tunnel);
13177                         last_item = MLX5_FLOW_LAYER_ICMP;
13178                         break;
13179                 case RTE_FLOW_ITEM_TYPE_ICMP6:
13180                         flow_dv_translate_item_icmp6(match_mask, match_value,
13181                                                       items, tunnel);
13182                         last_item = MLX5_FLOW_LAYER_ICMP6;
13183                         break;
13184                 case RTE_FLOW_ITEM_TYPE_TAG:
13185                         flow_dv_translate_item_tag(dev, match_mask,
13186                                                    match_value, items);
13187                         last_item = MLX5_FLOW_ITEM_TAG;
13188                         break;
13189                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
13190                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
13191                                                         match_value, items);
13192                         last_item = MLX5_FLOW_ITEM_TAG;
13193                         break;
13194                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
13195                         flow_dv_translate_item_tx_queue(dev, match_mask,
13196                                                         match_value,
13197                                                         items);
13198                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
13199                         break;
13200                 case RTE_FLOW_ITEM_TYPE_GTP:
13201                         flow_dv_translate_item_gtp(match_mask, match_value,
13202                                                    items, tunnel);
13203                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13204                         last_item = MLX5_FLOW_LAYER_GTP;
13205                         break;
13206                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
13207                         ret = flow_dv_translate_item_gtp_psc(match_mask,
13208                                                           match_value,
13209                                                           items);
13210                         if (ret)
13211                                 return rte_flow_error_set(error, -ret,
13212                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13213                                         "cannot create GTP PSC item");
13214                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
13215                         break;
13216                 case RTE_FLOW_ITEM_TYPE_ECPRI:
13217                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
13218                                 /* Create it only the first time to be used. */
13219                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
13220                                 if (ret)
13221                                         return rte_flow_error_set
13222                                                 (error, -ret,
13223                                                 RTE_FLOW_ERROR_TYPE_ITEM,
13224                                                 NULL,
13225                                                 "cannot create eCPRI parser");
13226                         }
13227                         flow_dv_translate_item_ecpri(dev, match_mask,
13228                                                      match_value, items);
13229                         /* No other protocol should follow eCPRI layer. */
13230                         last_item = MLX5_FLOW_LAYER_ECPRI;
13231                         break;
13232                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
13233                         flow_dv_translate_item_integrity(match_mask,
13234                                                          match_value,
13235                                                          head_item, items);
13236                         break;
13237                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
13238                         flow_dv_translate_item_aso_ct(dev, match_mask,
13239                                                       match_value, items);
13240                         break;
13241                 default:
13242                         break;
13243                 }
13244                 item_flags |= last_item;
13245         }
13246         /*
13247          * When E-Switch mode is enabled, we have two cases where we need to
13248          * set the source port manually.
13249          * The first one, is in case of Nic steering rule, and the second is
13250          * E-Switch rule where no port_id item was found. In both cases
13251          * the source port is set according the current port in use.
13252          */
13253         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
13254             (priv->representor || priv->master)) {
13255                 if (flow_dv_translate_item_port_id(dev, match_mask,
13256                                                    match_value, NULL, attr))
13257                         return -rte_errno;
13258         }
13259 #ifdef RTE_LIBRTE_MLX5_DEBUG
13260         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
13261                                               dev_flow->dv.value.buf));
13262 #endif
13263         /*
13264          * Layers may be already initialized from prefix flow if this dev_flow
13265          * is the suffix flow.
13266          */
13267         handle->layers |= item_flags;
13268         if (action_flags & MLX5_FLOW_ACTION_RSS)
13269                 flow_dv_hashfields_set(dev_flow, rss_desc);
13270         /* If has RSS action in the sample action, the Sample/Mirror resource
13271          * should be registered after the hash filed be update.
13272          */
13273         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
13274                 ret = flow_dv_translate_action_sample(dev,
13275                                                       sample,
13276                                                       dev_flow, attr,
13277                                                       &num_of_dest,
13278                                                       sample_actions,
13279                                                       &sample_res,
13280                                                       error);
13281                 if (ret < 0)
13282                         return ret;
13283                 ret = flow_dv_create_action_sample(dev,
13284                                                    dev_flow,
13285                                                    num_of_dest,
13286                                                    &sample_res,
13287                                                    &mdest_res,
13288                                                    sample_actions,
13289                                                    action_flags,
13290                                                    error);
13291                 if (ret < 0)
13292                         return rte_flow_error_set
13293                                                 (error, rte_errno,
13294                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13295                                                 NULL,
13296                                                 "cannot create sample action");
13297                 if (num_of_dest > 1) {
13298                         dev_flow->dv.actions[sample_act_pos] =
13299                         dev_flow->dv.dest_array_res->action;
13300                 } else {
13301                         dev_flow->dv.actions[sample_act_pos] =
13302                         dev_flow->dv.sample_res->verbs_action;
13303                 }
13304         }
13305         /*
13306          * For multiple destination (sample action with ratio=1), the encap
13307          * action and port id action will be combined into group action.
13308          * So need remove the original these actions in the flow and only
13309          * use the sample action instead of.
13310          */
13311         if (num_of_dest > 1 &&
13312             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
13313                 int i;
13314                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
13315
13316                 for (i = 0; i < actions_n; i++) {
13317                         if ((sample_act->dr_encap_action &&
13318                                 sample_act->dr_encap_action ==
13319                                 dev_flow->dv.actions[i]) ||
13320                                 (sample_act->dr_port_id_action &&
13321                                 sample_act->dr_port_id_action ==
13322                                 dev_flow->dv.actions[i]) ||
13323                                 (sample_act->dr_jump_action &&
13324                                 sample_act->dr_jump_action ==
13325                                 dev_flow->dv.actions[i]))
13326                                 continue;
13327                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
13328                 }
13329                 memcpy((void *)dev_flow->dv.actions,
13330                                 (void *)temp_actions,
13331                                 tmp_actions_n * sizeof(void *));
13332                 actions_n = tmp_actions_n;
13333         }
13334         dev_flow->dv.actions_n = actions_n;
13335         dev_flow->act_flags = action_flags;
13336         if (wks->skip_matcher_reg)
13337                 return 0;
13338         /* Register matcher. */
13339         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
13340                                     matcher.mask.size);
13341         matcher.priority = mlx5_get_matcher_priority(dev, attr,
13342                                         matcher.priority);
13343         /**
13344          * When creating meter drop flow in drop table, using original
13345          * 5-tuple match, the matcher priority should be lower than
13346          * mtr_id matcher.
13347          */
13348         if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
13349             dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP &&
13350             matcher.priority <= MLX5_REG_BITS)
13351                 matcher.priority += MLX5_REG_BITS;
13352         /* reserved field no needs to be set to 0 here. */
13353         tbl_key.is_fdb = attr->transfer;
13354         tbl_key.is_egress = attr->egress;
13355         tbl_key.level = dev_flow->dv.group;
13356         tbl_key.id = dev_flow->dv.table_id;
13357         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
13358                                      tunnel, attr->group, error))
13359                 return -rte_errno;
13360         return 0;
13361 }
13362
13363 /**
13364  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13365  * and tunnel.
13366  *
13367  * @param[in, out] action
13368  *   Shred RSS action holding hash RX queue objects.
13369  * @param[in] hash_fields
13370  *   Defines combination of packet fields to participate in RX hash.
13371  * @param[in] tunnel
13372  *   Tunnel type
13373  * @param[in] hrxq_idx
13374  *   Hash RX queue index to set.
13375  *
13376  * @return
13377  *   0 on success, otherwise negative errno value.
13378  */
13379 static int
13380 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
13381                               const uint64_t hash_fields,
13382                               uint32_t hrxq_idx)
13383 {
13384         uint32_t *hrxqs = action->hrxq;
13385
13386         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13387         case MLX5_RSS_HASH_IPV4:
13388                 /* fall-through. */
13389         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13390                 /* fall-through. */
13391         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13392                 hrxqs[0] = hrxq_idx;
13393                 return 0;
13394         case MLX5_RSS_HASH_IPV4_TCP:
13395                 /* fall-through. */
13396         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13397                 /* fall-through. */
13398         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13399                 hrxqs[1] = hrxq_idx;
13400                 return 0;
13401         case MLX5_RSS_HASH_IPV4_UDP:
13402                 /* fall-through. */
13403         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13404                 /* fall-through. */
13405         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13406                 hrxqs[2] = hrxq_idx;
13407                 return 0;
13408         case MLX5_RSS_HASH_IPV6:
13409                 /* fall-through. */
13410         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13411                 /* fall-through. */
13412         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13413                 hrxqs[3] = hrxq_idx;
13414                 return 0;
13415         case MLX5_RSS_HASH_IPV6_TCP:
13416                 /* fall-through. */
13417         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13418                 /* fall-through. */
13419         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13420                 hrxqs[4] = hrxq_idx;
13421                 return 0;
13422         case MLX5_RSS_HASH_IPV6_UDP:
13423                 /* fall-through. */
13424         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13425                 /* fall-through. */
13426         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13427                 hrxqs[5] = hrxq_idx;
13428                 return 0;
13429         case MLX5_RSS_HASH_NONE:
13430                 hrxqs[6] = hrxq_idx;
13431                 return 0;
13432         default:
13433                 return -1;
13434         }
13435 }
13436
13437 /**
13438  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13439  * and tunnel.
13440  *
13441  * @param[in] dev
13442  *   Pointer to the Ethernet device structure.
13443  * @param[in] idx
13444  *   Shared RSS action ID holding hash RX queue objects.
13445  * @param[in] hash_fields
13446  *   Defines combination of packet fields to participate in RX hash.
13447  * @param[in] tunnel
13448  *   Tunnel type
13449  *
13450  * @return
13451  *   Valid hash RX queue index, otherwise 0.
13452  */
13453 static uint32_t
13454 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
13455                                  const uint64_t hash_fields)
13456 {
13457         struct mlx5_priv *priv = dev->data->dev_private;
13458         struct mlx5_shared_action_rss *shared_rss =
13459             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13460         const uint32_t *hrxqs = shared_rss->hrxq;
13461
13462         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13463         case MLX5_RSS_HASH_IPV4:
13464                 /* fall-through. */
13465         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13466                 /* fall-through. */
13467         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13468                 return hrxqs[0];
13469         case MLX5_RSS_HASH_IPV4_TCP:
13470                 /* fall-through. */
13471         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13472                 /* fall-through. */
13473         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13474                 return hrxqs[1];
13475         case MLX5_RSS_HASH_IPV4_UDP:
13476                 /* fall-through. */
13477         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13478                 /* fall-through. */
13479         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13480                 return hrxqs[2];
13481         case MLX5_RSS_HASH_IPV6:
13482                 /* fall-through. */
13483         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13484                 /* fall-through. */
13485         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13486                 return hrxqs[3];
13487         case MLX5_RSS_HASH_IPV6_TCP:
13488                 /* fall-through. */
13489         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13490                 /* fall-through. */
13491         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13492                 return hrxqs[4];
13493         case MLX5_RSS_HASH_IPV6_UDP:
13494                 /* fall-through. */
13495         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13496                 /* fall-through. */
13497         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13498                 return hrxqs[5];
13499         case MLX5_RSS_HASH_NONE:
13500                 return hrxqs[6];
13501         default:
13502                 return 0;
13503         }
13504
13505 }
13506
13507 /**
13508  * Apply the flow to the NIC, lock free,
13509  * (mutex should be acquired by caller).
13510  *
13511  * @param[in] dev
13512  *   Pointer to the Ethernet device structure.
13513  * @param[in, out] flow
13514  *   Pointer to flow structure.
13515  * @param[out] error
13516  *   Pointer to error structure.
13517  *
13518  * @return
13519  *   0 on success, a negative errno value otherwise and rte_errno is set.
13520  */
13521 static int
13522 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
13523               struct rte_flow_error *error)
13524 {
13525         struct mlx5_flow_dv_workspace *dv;
13526         struct mlx5_flow_handle *dh;
13527         struct mlx5_flow_handle_dv *dv_h;
13528         struct mlx5_flow *dev_flow;
13529         struct mlx5_priv *priv = dev->data->dev_private;
13530         uint32_t handle_idx;
13531         int n;
13532         int err;
13533         int idx;
13534         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
13535         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
13536         uint8_t misc_mask;
13537
13538         MLX5_ASSERT(wks);
13539         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
13540                 dev_flow = &wks->flows[idx];
13541                 dv = &dev_flow->dv;
13542                 dh = dev_flow->handle;
13543                 dv_h = &dh->dvh;
13544                 n = dv->actions_n;
13545                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
13546                         if (dv->transfer) {
13547                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13548                                 dv->actions[n++] = priv->sh->dr_drop_action;
13549                         } else {
13550 #ifdef HAVE_MLX5DV_DR
13551                                 /* DR supports drop action placeholder. */
13552                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13553                                 dv->actions[n++] = priv->sh->dr_drop_action;
13554 #else
13555                                 /* For DV we use the explicit drop queue. */
13556                                 MLX5_ASSERT(priv->drop_queue.hrxq);
13557                                 dv->actions[n++] =
13558                                                 priv->drop_queue.hrxq->action;
13559 #endif
13560                         }
13561                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
13562                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
13563                         struct mlx5_hrxq *hrxq;
13564                         uint32_t hrxq_idx;
13565
13566                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
13567                                                     &hrxq_idx);
13568                         if (!hrxq) {
13569                                 rte_flow_error_set
13570                                         (error, rte_errno,
13571                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13572                                          "cannot get hash queue");
13573                                 goto error;
13574                         }
13575                         dh->rix_hrxq = hrxq_idx;
13576                         dv->actions[n++] = hrxq->action;
13577                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13578                         struct mlx5_hrxq *hrxq = NULL;
13579                         uint32_t hrxq_idx;
13580
13581                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
13582                                                 rss_desc->shared_rss,
13583                                                 dev_flow->hash_fields);
13584                         if (hrxq_idx)
13585                                 hrxq = mlx5_ipool_get
13586                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
13587                                          hrxq_idx);
13588                         if (!hrxq) {
13589                                 rte_flow_error_set
13590                                         (error, rte_errno,
13591                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13592                                          "cannot get hash queue");
13593                                 goto error;
13594                         }
13595                         dh->rix_srss = rss_desc->shared_rss;
13596                         dv->actions[n++] = hrxq->action;
13597                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
13598                         if (!priv->sh->default_miss_action) {
13599                                 rte_flow_error_set
13600                                         (error, rte_errno,
13601                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13602                                          "default miss action not be created.");
13603                                 goto error;
13604                         }
13605                         dv->actions[n++] = priv->sh->default_miss_action;
13606                 }
13607                 misc_mask = flow_dv_matcher_enable(dv->value.buf);
13608                 __flow_dv_adjust_buf_size(&dv->value.size, misc_mask);
13609                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
13610                                                (void *)&dv->value, n,
13611                                                dv->actions, &dh->drv_flow);
13612                 if (err) {
13613                         rte_flow_error_set
13614                                 (error, errno,
13615                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13616                                 NULL,
13617                                 (!priv->config.allow_duplicate_pattern &&
13618                                 errno == EEXIST) ?
13619                                 "duplicating pattern is not allowed" :
13620                                 "hardware refuses to create flow");
13621                         goto error;
13622                 }
13623                 if (priv->vmwa_context &&
13624                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
13625                         /*
13626                          * The rule contains the VLAN pattern.
13627                          * For VF we are going to create VLAN
13628                          * interface to make hypervisor set correct
13629                          * e-Switch vport context.
13630                          */
13631                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
13632                 }
13633         }
13634         return 0;
13635 error:
13636         err = rte_errno; /* Save rte_errno before cleanup. */
13637         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
13638                        handle_idx, dh, next) {
13639                 /* hrxq is union, don't clear it if the flag is not set. */
13640                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
13641                         mlx5_hrxq_release(dev, dh->rix_hrxq);
13642                         dh->rix_hrxq = 0;
13643                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13644                         dh->rix_srss = 0;
13645                 }
13646                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
13647                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
13648         }
13649         rte_errno = err; /* Restore rte_errno. */
13650         return -rte_errno;
13651 }
13652
13653 void
13654 flow_dv_matcher_remove_cb(struct mlx5_list *list __rte_unused,
13655                           struct mlx5_list_entry *entry)
13656 {
13657         struct mlx5_flow_dv_matcher *resource = container_of(entry,
13658                                                              typeof(*resource),
13659                                                              entry);
13660
13661         claim_zero(mlx5_flow_os_destroy_flow_matcher(resource->matcher_object));
13662         mlx5_free(resource);
13663 }
13664
13665 /**
13666  * Release the flow matcher.
13667  *
13668  * @param dev
13669  *   Pointer to Ethernet device.
13670  * @param port_id
13671  *   Index to port ID action resource.
13672  *
13673  * @return
13674  *   1 while a reference on it exists, 0 when freed.
13675  */
13676 static int
13677 flow_dv_matcher_release(struct rte_eth_dev *dev,
13678                         struct mlx5_flow_handle *handle)
13679 {
13680         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
13681         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
13682                                                             typeof(*tbl), tbl);
13683         int ret;
13684
13685         MLX5_ASSERT(matcher->matcher_object);
13686         ret = mlx5_list_unregister(tbl->matchers, &matcher->entry);
13687         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
13688         return ret;
13689 }
13690
13691 /**
13692  * Release encap_decap resource.
13693  *
13694  * @param list
13695  *   Pointer to the hash list.
13696  * @param entry
13697  *   Pointer to exist resource entry object.
13698  */
13699 void
13700 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
13701                               struct mlx5_hlist_entry *entry)
13702 {
13703         struct mlx5_dev_ctx_shared *sh = list->ctx;
13704         struct mlx5_flow_dv_encap_decap_resource *res =
13705                                        container_of(entry, typeof(*res), entry);
13706
13707         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13708         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
13709 }
13710
13711 /**
13712  * Release an encap/decap resource.
13713  *
13714  * @param dev
13715  *   Pointer to Ethernet device.
13716  * @param encap_decap_idx
13717  *   Index of encap decap resource.
13718  *
13719  * @return
13720  *   1 while a reference on it exists, 0 when freed.
13721  */
13722 static int
13723 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
13724                                      uint32_t encap_decap_idx)
13725 {
13726         struct mlx5_priv *priv = dev->data->dev_private;
13727         struct mlx5_flow_dv_encap_decap_resource *resource;
13728
13729         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
13730                                   encap_decap_idx);
13731         if (!resource)
13732                 return 0;
13733         MLX5_ASSERT(resource->action);
13734         return mlx5_hlist_unregister(priv->sh->encaps_decaps, &resource->entry);
13735 }
13736
13737 /**
13738  * Release an jump to table action resource.
13739  *
13740  * @param dev
13741  *   Pointer to Ethernet device.
13742  * @param rix_jump
13743  *   Index to the jump action resource.
13744  *
13745  * @return
13746  *   1 while a reference on it exists, 0 when freed.
13747  */
13748 static int
13749 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
13750                                   uint32_t rix_jump)
13751 {
13752         struct mlx5_priv *priv = dev->data->dev_private;
13753         struct mlx5_flow_tbl_data_entry *tbl_data;
13754
13755         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
13756                                   rix_jump);
13757         if (!tbl_data)
13758                 return 0;
13759         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
13760 }
13761
13762 void
13763 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
13764                          struct mlx5_hlist_entry *entry)
13765 {
13766         struct mlx5_flow_dv_modify_hdr_resource *res =
13767                 container_of(entry, typeof(*res), entry);
13768
13769         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13770         mlx5_free(entry);
13771 }
13772
13773 /**
13774  * Release a modify-header resource.
13775  *
13776  * @param dev
13777  *   Pointer to Ethernet device.
13778  * @param handle
13779  *   Pointer to mlx5_flow_handle.
13780  *
13781  * @return
13782  *   1 while a reference on it exists, 0 when freed.
13783  */
13784 static int
13785 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
13786                                     struct mlx5_flow_handle *handle)
13787 {
13788         struct mlx5_priv *priv = dev->data->dev_private;
13789         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
13790
13791         MLX5_ASSERT(entry->action);
13792         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
13793 }
13794
13795 void
13796 flow_dv_port_id_remove_cb(struct mlx5_list *list,
13797                           struct mlx5_list_entry *entry)
13798 {
13799         struct mlx5_dev_ctx_shared *sh = list->ctx;
13800         struct mlx5_flow_dv_port_id_action_resource *resource =
13801                                   container_of(entry, typeof(*resource), entry);
13802
13803         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
13804         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
13805 }
13806
13807 /**
13808  * Release port ID action resource.
13809  *
13810  * @param dev
13811  *   Pointer to Ethernet device.
13812  * @param handle
13813  *   Pointer to mlx5_flow_handle.
13814  *
13815  * @return
13816  *   1 while a reference on it exists, 0 when freed.
13817  */
13818 static int
13819 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
13820                                         uint32_t port_id)
13821 {
13822         struct mlx5_priv *priv = dev->data->dev_private;
13823         struct mlx5_flow_dv_port_id_action_resource *resource;
13824
13825         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
13826         if (!resource)
13827                 return 0;
13828         MLX5_ASSERT(resource->action);
13829         return mlx5_list_unregister(priv->sh->port_id_action_list,
13830                                     &resource->entry);
13831 }
13832
13833 /**
13834  * Release shared RSS action resource.
13835  *
13836  * @param dev
13837  *   Pointer to Ethernet device.
13838  * @param srss
13839  *   Shared RSS action index.
13840  */
13841 static void
13842 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
13843 {
13844         struct mlx5_priv *priv = dev->data->dev_private;
13845         struct mlx5_shared_action_rss *shared_rss;
13846
13847         shared_rss = mlx5_ipool_get
13848                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
13849         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
13850 }
13851
13852 void
13853 flow_dv_push_vlan_remove_cb(struct mlx5_list *list,
13854                             struct mlx5_list_entry *entry)
13855 {
13856         struct mlx5_dev_ctx_shared *sh = list->ctx;
13857         struct mlx5_flow_dv_push_vlan_action_resource *resource =
13858                         container_of(entry, typeof(*resource), entry);
13859
13860         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
13861         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
13862 }
13863
13864 /**
13865  * Release push vlan action resource.
13866  *
13867  * @param dev
13868  *   Pointer to Ethernet device.
13869  * @param handle
13870  *   Pointer to mlx5_flow_handle.
13871  *
13872  * @return
13873  *   1 while a reference on it exists, 0 when freed.
13874  */
13875 static int
13876 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
13877                                           struct mlx5_flow_handle *handle)
13878 {
13879         struct mlx5_priv *priv = dev->data->dev_private;
13880         struct mlx5_flow_dv_push_vlan_action_resource *resource;
13881         uint32_t idx = handle->dvh.rix_push_vlan;
13882
13883         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
13884         if (!resource)
13885                 return 0;
13886         MLX5_ASSERT(resource->action);
13887         return mlx5_list_unregister(priv->sh->push_vlan_action_list,
13888                                     &resource->entry);
13889 }
13890
13891 /**
13892  * Release the fate resource.
13893  *
13894  * @param dev
13895  *   Pointer to Ethernet device.
13896  * @param handle
13897  *   Pointer to mlx5_flow_handle.
13898  */
13899 static void
13900 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
13901                                struct mlx5_flow_handle *handle)
13902 {
13903         if (!handle->rix_fate)
13904                 return;
13905         switch (handle->fate_action) {
13906         case MLX5_FLOW_FATE_QUEUE:
13907                 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
13908                         mlx5_hrxq_release(dev, handle->rix_hrxq);
13909                 break;
13910         case MLX5_FLOW_FATE_JUMP:
13911                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
13912                 break;
13913         case MLX5_FLOW_FATE_PORT_ID:
13914                 flow_dv_port_id_action_resource_release(dev,
13915                                 handle->rix_port_id_action);
13916                 break;
13917         default:
13918                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
13919                 break;
13920         }
13921         handle->rix_fate = 0;
13922 }
13923
13924 void
13925 flow_dv_sample_remove_cb(struct mlx5_list *list __rte_unused,
13926                          struct mlx5_list_entry *entry)
13927 {
13928         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
13929                                                               typeof(*resource),
13930                                                               entry);
13931         struct rte_eth_dev *dev = resource->dev;
13932         struct mlx5_priv *priv = dev->data->dev_private;
13933
13934         if (resource->verbs_action)
13935                 claim_zero(mlx5_flow_os_destroy_flow_action
13936                                                       (resource->verbs_action));
13937         if (resource->normal_path_tbl)
13938                 flow_dv_tbl_resource_release(MLX5_SH(dev),
13939                                              resource->normal_path_tbl);
13940         flow_dv_sample_sub_actions_release(dev, &resource->sample_idx);
13941         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
13942         DRV_LOG(DEBUG, "sample resource %p: removed", (void *)resource);
13943 }
13944
13945 /**
13946  * Release an sample resource.
13947  *
13948  * @param dev
13949  *   Pointer to Ethernet device.
13950  * @param handle
13951  *   Pointer to mlx5_flow_handle.
13952  *
13953  * @return
13954  *   1 while a reference on it exists, 0 when freed.
13955  */
13956 static int
13957 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
13958                                      struct mlx5_flow_handle *handle)
13959 {
13960         struct mlx5_priv *priv = dev->data->dev_private;
13961         struct mlx5_flow_dv_sample_resource *resource;
13962
13963         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
13964                                   handle->dvh.rix_sample);
13965         if (!resource)
13966                 return 0;
13967         MLX5_ASSERT(resource->verbs_action);
13968         return mlx5_list_unregister(priv->sh->sample_action_list,
13969                                     &resource->entry);
13970 }
13971
13972 void
13973 flow_dv_dest_array_remove_cb(struct mlx5_list *list __rte_unused,
13974                              struct mlx5_list_entry *entry)
13975 {
13976         struct mlx5_flow_dv_dest_array_resource *resource =
13977                         container_of(entry, typeof(*resource), entry);
13978         struct rte_eth_dev *dev = resource->dev;
13979         struct mlx5_priv *priv = dev->data->dev_private;
13980         uint32_t i = 0;
13981
13982         MLX5_ASSERT(resource->action);
13983         if (resource->action)
13984                 claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
13985         for (; i < resource->num_of_dest; i++)
13986                 flow_dv_sample_sub_actions_release(dev,
13987                                                    &resource->sample_idx[i]);
13988         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
13989         DRV_LOG(DEBUG, "destination array resource %p: removed",
13990                 (void *)resource);
13991 }
13992
13993 /**
13994  * Release an destination array resource.
13995  *
13996  * @param dev
13997  *   Pointer to Ethernet device.
13998  * @param handle
13999  *   Pointer to mlx5_flow_handle.
14000  *
14001  * @return
14002  *   1 while a reference on it exists, 0 when freed.
14003  */
14004 static int
14005 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
14006                                     struct mlx5_flow_handle *handle)
14007 {
14008         struct mlx5_priv *priv = dev->data->dev_private;
14009         struct mlx5_flow_dv_dest_array_resource *resource;
14010
14011         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
14012                                   handle->dvh.rix_dest_array);
14013         if (!resource)
14014                 return 0;
14015         MLX5_ASSERT(resource->action);
14016         return mlx5_list_unregister(priv->sh->dest_array_list,
14017                                     &resource->entry);
14018 }
14019
14020 static void
14021 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
14022 {
14023         struct mlx5_priv *priv = dev->data->dev_private;
14024         struct mlx5_dev_ctx_shared *sh = priv->sh;
14025         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
14026                                 sh->geneve_tlv_option_resource;
14027         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
14028         if (geneve_opt_resource) {
14029                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
14030                                          __ATOMIC_RELAXED))) {
14031                         claim_zero(mlx5_devx_cmd_destroy
14032                                         (geneve_opt_resource->obj));
14033                         mlx5_free(sh->geneve_tlv_option_resource);
14034                         sh->geneve_tlv_option_resource = NULL;
14035                 }
14036         }
14037         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
14038 }
14039
14040 /**
14041  * Remove the flow from the NIC but keeps it in memory.
14042  * Lock free, (mutex should be acquired by caller).
14043  *
14044  * @param[in] dev
14045  *   Pointer to Ethernet device.
14046  * @param[in, out] flow
14047  *   Pointer to flow structure.
14048  */
14049 static void
14050 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
14051 {
14052         struct mlx5_flow_handle *dh;
14053         uint32_t handle_idx;
14054         struct mlx5_priv *priv = dev->data->dev_private;
14055
14056         if (!flow)
14057                 return;
14058         handle_idx = flow->dev_handles;
14059         while (handle_idx) {
14060                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14061                                     handle_idx);
14062                 if (!dh)
14063                         return;
14064                 if (dh->drv_flow) {
14065                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
14066                         dh->drv_flow = NULL;
14067                 }
14068                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
14069                         flow_dv_fate_resource_release(dev, dh);
14070                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
14071                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
14072                 handle_idx = dh->next.next;
14073         }
14074 }
14075
14076 /**
14077  * Remove the flow from the NIC and the memory.
14078  * Lock free, (mutex should be acquired by caller).
14079  *
14080  * @param[in] dev
14081  *   Pointer to the Ethernet device structure.
14082  * @param[in, out] flow
14083  *   Pointer to flow structure.
14084  */
14085 static void
14086 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
14087 {
14088         struct mlx5_flow_handle *dev_handle;
14089         struct mlx5_priv *priv = dev->data->dev_private;
14090         struct mlx5_flow_meter_info *fm = NULL;
14091         uint32_t srss = 0;
14092
14093         if (!flow)
14094                 return;
14095         flow_dv_remove(dev, flow);
14096         if (flow->counter) {
14097                 flow_dv_counter_free(dev, flow->counter);
14098                 flow->counter = 0;
14099         }
14100         if (flow->meter) {
14101                 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
14102                 if (fm)
14103                         mlx5_flow_meter_detach(priv, fm);
14104                 flow->meter = 0;
14105         }
14106         /* Keep the current age handling by default. */
14107         if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
14108                 flow_dv_aso_ct_release(dev, flow->ct);
14109         else if (flow->age)
14110                 flow_dv_aso_age_release(dev, flow->age);
14111         if (flow->geneve_tlv_option) {
14112                 flow_dv_geneve_tlv_option_resource_release(dev);
14113                 flow->geneve_tlv_option = 0;
14114         }
14115         while (flow->dev_handles) {
14116                 uint32_t tmp_idx = flow->dev_handles;
14117
14118                 dev_handle = mlx5_ipool_get(priv->sh->ipool
14119                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
14120                 if (!dev_handle)
14121                         return;
14122                 flow->dev_handles = dev_handle->next.next;
14123                 if (dev_handle->dvh.matcher)
14124                         flow_dv_matcher_release(dev, dev_handle);
14125                 if (dev_handle->dvh.rix_sample)
14126                         flow_dv_sample_resource_release(dev, dev_handle);
14127                 if (dev_handle->dvh.rix_dest_array)
14128                         flow_dv_dest_array_resource_release(dev, dev_handle);
14129                 if (dev_handle->dvh.rix_encap_decap)
14130                         flow_dv_encap_decap_resource_release(dev,
14131                                 dev_handle->dvh.rix_encap_decap);
14132                 if (dev_handle->dvh.modify_hdr)
14133                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
14134                 if (dev_handle->dvh.rix_push_vlan)
14135                         flow_dv_push_vlan_action_resource_release(dev,
14136                                                                   dev_handle);
14137                 if (dev_handle->dvh.rix_tag)
14138                         flow_dv_tag_release(dev,
14139                                             dev_handle->dvh.rix_tag);
14140                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
14141                         flow_dv_fate_resource_release(dev, dev_handle);
14142                 else if (!srss)
14143                         srss = dev_handle->rix_srss;
14144                 if (fm && dev_handle->is_meter_flow_id &&
14145                     dev_handle->split_flow_id)
14146                         mlx5_ipool_free(fm->flow_ipool,
14147                                         dev_handle->split_flow_id);
14148                 else if (dev_handle->split_flow_id &&
14149                     !dev_handle->is_meter_flow_id)
14150                         mlx5_ipool_free(priv->sh->ipool
14151                                         [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
14152                                         dev_handle->split_flow_id);
14153                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14154                            tmp_idx);
14155         }
14156         if (srss)
14157                 flow_dv_shared_rss_action_release(dev, srss);
14158 }
14159
14160 /**
14161  * Release array of hash RX queue objects.
14162  * Helper function.
14163  *
14164  * @param[in] dev
14165  *   Pointer to the Ethernet device structure.
14166  * @param[in, out] hrxqs
14167  *   Array of hash RX queue objects.
14168  *
14169  * @return
14170  *   Total number of references to hash RX queue objects in *hrxqs* array
14171  *   after this operation.
14172  */
14173 static int
14174 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
14175                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
14176 {
14177         size_t i;
14178         int remaining = 0;
14179
14180         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
14181                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
14182
14183                 if (!ret)
14184                         (*hrxqs)[i] = 0;
14185                 remaining += ret;
14186         }
14187         return remaining;
14188 }
14189
14190 /**
14191  * Release all hash RX queue objects representing shared RSS action.
14192  *
14193  * @param[in] dev
14194  *   Pointer to the Ethernet device structure.
14195  * @param[in, out] action
14196  *   Shared RSS action to remove hash RX queue objects from.
14197  *
14198  * @return
14199  *   Total number of references to hash RX queue objects stored in *action*
14200  *   after this operation.
14201  *   Expected to be 0 if no external references held.
14202  */
14203 static int
14204 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
14205                                  struct mlx5_shared_action_rss *shared_rss)
14206 {
14207         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
14208 }
14209
14210 /**
14211  * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
14212  * user input.
14213  *
14214  * Only one hash value is available for one L3+L4 combination:
14215  * for example:
14216  * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
14217  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
14218  * same slot in mlx5_rss_hash_fields.
14219  *
14220  * @param[in] rss
14221  *   Pointer to the shared action RSS conf.
14222  * @param[in, out] hash_field
14223  *   hash_field variable needed to be adjusted.
14224  *
14225  * @return
14226  *   void
14227  */
14228 static void
14229 __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
14230                                      uint64_t *hash_field)
14231 {
14232         uint64_t rss_types = rss->origin.types;
14233
14234         switch (*hash_field & ~IBV_RX_HASH_INNER) {
14235         case MLX5_RSS_HASH_IPV4:
14236                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
14237                         *hash_field &= ~MLX5_RSS_HASH_IPV4;
14238                         if (rss_types & ETH_RSS_L3_DST_ONLY)
14239                                 *hash_field |= IBV_RX_HASH_DST_IPV4;
14240                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
14241                                 *hash_field |= IBV_RX_HASH_SRC_IPV4;
14242                         else
14243                                 *hash_field |= MLX5_RSS_HASH_IPV4;
14244                 }
14245                 return;
14246         case MLX5_RSS_HASH_IPV6:
14247                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
14248                         *hash_field &= ~MLX5_RSS_HASH_IPV6;
14249                         if (rss_types & ETH_RSS_L3_DST_ONLY)
14250                                 *hash_field |= IBV_RX_HASH_DST_IPV6;
14251                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
14252                                 *hash_field |= IBV_RX_HASH_SRC_IPV6;
14253                         else
14254                                 *hash_field |= MLX5_RSS_HASH_IPV6;
14255                 }
14256                 return;
14257         case MLX5_RSS_HASH_IPV4_UDP:
14258                 /* fall-through. */
14259         case MLX5_RSS_HASH_IPV6_UDP:
14260                 if (rss_types & ETH_RSS_UDP) {
14261                         *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
14262                         if (rss_types & ETH_RSS_L4_DST_ONLY)
14263                                 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
14264                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
14265                                 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
14266                         else
14267                                 *hash_field |= MLX5_UDP_IBV_RX_HASH;
14268                 }
14269                 return;
14270         case MLX5_RSS_HASH_IPV4_TCP:
14271                 /* fall-through. */
14272         case MLX5_RSS_HASH_IPV6_TCP:
14273                 if (rss_types & ETH_RSS_TCP) {
14274                         *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
14275                         if (rss_types & ETH_RSS_L4_DST_ONLY)
14276                                 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
14277                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
14278                                 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
14279                         else
14280                                 *hash_field |= MLX5_TCP_IBV_RX_HASH;
14281                 }
14282                 return;
14283         default:
14284                 return;
14285         }
14286 }
14287
14288 /**
14289  * Setup shared RSS action.
14290  * Prepare set of hash RX queue objects sufficient to handle all valid
14291  * hash_fields combinations (see enum ibv_rx_hash_fields).
14292  *
14293  * @param[in] dev
14294  *   Pointer to the Ethernet device structure.
14295  * @param[in] action_idx
14296  *   Shared RSS action ipool index.
14297  * @param[in, out] action
14298  *   Partially initialized shared RSS action.
14299  * @param[out] error
14300  *   Perform verbose error reporting if not NULL. Initialized in case of
14301  *   error only.
14302  *
14303  * @return
14304  *   0 on success, otherwise negative errno value.
14305  */
14306 static int
14307 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
14308                            uint32_t action_idx,
14309                            struct mlx5_shared_action_rss *shared_rss,
14310                            struct rte_flow_error *error)
14311 {
14312         struct mlx5_flow_rss_desc rss_desc = { 0 };
14313         size_t i;
14314         int err;
14315
14316         if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
14317                 return rte_flow_error_set(error, rte_errno,
14318                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14319                                           "cannot setup indirection table");
14320         }
14321         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
14322         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
14323         rss_desc.const_q = shared_rss->origin.queue;
14324         rss_desc.queue_num = shared_rss->origin.queue_num;
14325         /* Set non-zero value to indicate a shared RSS. */
14326         rss_desc.shared_rss = action_idx;
14327         rss_desc.ind_tbl = shared_rss->ind_tbl;
14328         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
14329                 uint32_t hrxq_idx;
14330                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
14331                 int tunnel = 0;
14332
14333                 __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
14334                 if (shared_rss->origin.level > 1) {
14335                         hash_fields |= IBV_RX_HASH_INNER;
14336                         tunnel = 1;
14337                 }
14338                 rss_desc.tunnel = tunnel;
14339                 rss_desc.hash_fields = hash_fields;
14340                 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
14341                 if (!hrxq_idx) {
14342                         rte_flow_error_set
14343                                 (error, rte_errno,
14344                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14345                                  "cannot get hash queue");
14346                         goto error_hrxq_new;
14347                 }
14348                 err = __flow_dv_action_rss_hrxq_set
14349                         (shared_rss, hash_fields, hrxq_idx);
14350                 MLX5_ASSERT(!err);
14351         }
14352         return 0;
14353 error_hrxq_new:
14354         err = rte_errno;
14355         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14356         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
14357                 shared_rss->ind_tbl = NULL;
14358         rte_errno = err;
14359         return -rte_errno;
14360 }
14361
14362 /**
14363  * Create shared RSS action.
14364  *
14365  * @param[in] dev
14366  *   Pointer to the Ethernet device structure.
14367  * @param[in] conf
14368  *   Shared action configuration.
14369  * @param[in] rss
14370  *   RSS action specification used to create shared action.
14371  * @param[out] error
14372  *   Perform verbose error reporting if not NULL. Initialized in case of
14373  *   error only.
14374  *
14375  * @return
14376  *   A valid shared action ID in case of success, 0 otherwise and
14377  *   rte_errno is set.
14378  */
14379 static uint32_t
14380 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
14381                             const struct rte_flow_indir_action_conf *conf,
14382                             const struct rte_flow_action_rss *rss,
14383                             struct rte_flow_error *error)
14384 {
14385         struct mlx5_priv *priv = dev->data->dev_private;
14386         struct mlx5_shared_action_rss *shared_rss = NULL;
14387         void *queue = NULL;
14388         struct rte_flow_action_rss *origin;
14389         const uint8_t *rss_key;
14390         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
14391         uint32_t idx;
14392
14393         RTE_SET_USED(conf);
14394         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14395                             0, SOCKET_ID_ANY);
14396         shared_rss = mlx5_ipool_zmalloc
14397                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
14398         if (!shared_rss || !queue) {
14399                 rte_flow_error_set(error, ENOMEM,
14400                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14401                                    "cannot allocate resource memory");
14402                 goto error_rss_init;
14403         }
14404         if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
14405                 rte_flow_error_set(error, E2BIG,
14406                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14407                                    "rss action number out of range");
14408                 goto error_rss_init;
14409         }
14410         shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
14411                                           sizeof(*shared_rss->ind_tbl),
14412                                           0, SOCKET_ID_ANY);
14413         if (!shared_rss->ind_tbl) {
14414                 rte_flow_error_set(error, ENOMEM,
14415                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14416                                    "cannot allocate resource memory");
14417                 goto error_rss_init;
14418         }
14419         memcpy(queue, rss->queue, queue_size);
14420         shared_rss->ind_tbl->queues = queue;
14421         shared_rss->ind_tbl->queues_n = rss->queue_num;
14422         origin = &shared_rss->origin;
14423         origin->func = rss->func;
14424         origin->level = rss->level;
14425         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
14426         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
14427         /* NULL RSS key indicates default RSS key. */
14428         rss_key = !rss->key ? rss_hash_default_key : rss->key;
14429         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
14430         origin->key = &shared_rss->key[0];
14431         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
14432         origin->queue = queue;
14433         origin->queue_num = rss->queue_num;
14434         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
14435                 goto error_rss_init;
14436         rte_spinlock_init(&shared_rss->action_rss_sl);
14437         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14438         rte_spinlock_lock(&priv->shared_act_sl);
14439         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14440                      &priv->rss_shared_actions, idx, shared_rss, next);
14441         rte_spinlock_unlock(&priv->shared_act_sl);
14442         return idx;
14443 error_rss_init:
14444         if (shared_rss) {
14445                 if (shared_rss->ind_tbl)
14446                         mlx5_free(shared_rss->ind_tbl);
14447                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14448                                 idx);
14449         }
14450         if (queue)
14451                 mlx5_free(queue);
14452         return 0;
14453 }
14454
14455 /**
14456  * Destroy the shared RSS action.
14457  * Release related hash RX queue objects.
14458  *
14459  * @param[in] dev
14460  *   Pointer to the Ethernet device structure.
14461  * @param[in] idx
14462  *   The shared RSS action object ID to be removed.
14463  * @param[out] error
14464  *   Perform verbose error reporting if not NULL. Initialized in case of
14465  *   error only.
14466  *
14467  * @return
14468  *   0 on success, otherwise negative errno value.
14469  */
14470 static int
14471 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
14472                              struct rte_flow_error *error)
14473 {
14474         struct mlx5_priv *priv = dev->data->dev_private;
14475         struct mlx5_shared_action_rss *shared_rss =
14476             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14477         uint32_t old_refcnt = 1;
14478         int remaining;
14479         uint16_t *queue = NULL;
14480
14481         if (!shared_rss)
14482                 return rte_flow_error_set(error, EINVAL,
14483                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14484                                           "invalid shared action");
14485         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14486         if (remaining)
14487                 return rte_flow_error_set(error, EBUSY,
14488                                           RTE_FLOW_ERROR_TYPE_ACTION,
14489                                           NULL,
14490                                           "shared rss hrxq has references");
14491         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
14492                                          0, 0, __ATOMIC_ACQUIRE,
14493                                          __ATOMIC_RELAXED))
14494                 return rte_flow_error_set(error, EBUSY,
14495                                           RTE_FLOW_ERROR_TYPE_ACTION,
14496                                           NULL,
14497                                           "shared rss has references");
14498         queue = shared_rss->ind_tbl->queues;
14499         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
14500         if (remaining)
14501                 return rte_flow_error_set(error, EBUSY,
14502                                           RTE_FLOW_ERROR_TYPE_ACTION,
14503                                           NULL,
14504                                           "shared rss indirection table has"
14505                                           " references");
14506         mlx5_free(queue);
14507         rte_spinlock_lock(&priv->shared_act_sl);
14508         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14509                      &priv->rss_shared_actions, idx, shared_rss, next);
14510         rte_spinlock_unlock(&priv->shared_act_sl);
14511         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14512                         idx);
14513         return 0;
14514 }
14515
14516 /**
14517  * Create indirect action, lock free,
14518  * (mutex should be acquired by caller).
14519  * Dispatcher for action type specific call.
14520  *
14521  * @param[in] dev
14522  *   Pointer to the Ethernet device structure.
14523  * @param[in] conf
14524  *   Shared action configuration.
14525  * @param[in] action
14526  *   Action specification used to create indirect action.
14527  * @param[out] error
14528  *   Perform verbose error reporting if not NULL. Initialized in case of
14529  *   error only.
14530  *
14531  * @return
14532  *   A valid shared action handle in case of success, NULL otherwise and
14533  *   rte_errno is set.
14534  */
14535 static struct rte_flow_action_handle *
14536 flow_dv_action_create(struct rte_eth_dev *dev,
14537                       const struct rte_flow_indir_action_conf *conf,
14538                       const struct rte_flow_action *action,
14539                       struct rte_flow_error *err)
14540 {
14541         struct mlx5_priv *priv = dev->data->dev_private;
14542         uint32_t age_idx = 0;
14543         uint32_t idx = 0;
14544         uint32_t ret = 0;
14545
14546         switch (action->type) {
14547         case RTE_FLOW_ACTION_TYPE_RSS:
14548                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
14549                 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
14550                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14551                 break;
14552         case RTE_FLOW_ACTION_TYPE_AGE:
14553                 age_idx = flow_dv_aso_age_alloc(dev, err);
14554                 if (!age_idx) {
14555                         ret = -rte_errno;
14556                         break;
14557                 }
14558                 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
14559                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
14560                 flow_dv_aso_age_params_init(dev, age_idx,
14561                                         ((const struct rte_flow_action_age *)
14562                                                 action->conf)->context ?
14563                                         ((const struct rte_flow_action_age *)
14564                                                 action->conf)->context :
14565                                         (void *)(uintptr_t)idx,
14566                                         ((const struct rte_flow_action_age *)
14567                                                 action->conf)->timeout);
14568                 ret = age_idx;
14569                 break;
14570         case RTE_FLOW_ACTION_TYPE_COUNT:
14571                 ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
14572                 idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
14573                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14574                 break;
14575         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
14576                 ret = flow_dv_translate_create_conntrack(dev, action->conf,
14577                                                          err);
14578                 idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
14579                 break;
14580         default:
14581                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
14582                                    NULL, "action type not supported");
14583                 break;
14584         }
14585         return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
14586 }
14587
14588 /**
14589  * Destroy the indirect action.
14590  * Release action related resources on the NIC and the memory.
14591  * Lock free, (mutex should be acquired by caller).
14592  * Dispatcher for action type specific call.
14593  *
14594  * @param[in] dev
14595  *   Pointer to the Ethernet device structure.
14596  * @param[in] handle
14597  *   The indirect action object handle to be removed.
14598  * @param[out] error
14599  *   Perform verbose error reporting if not NULL. Initialized in case of
14600  *   error only.
14601  *
14602  * @return
14603  *   0 on success, otherwise negative errno value.
14604  */
14605 static int
14606 flow_dv_action_destroy(struct rte_eth_dev *dev,
14607                        struct rte_flow_action_handle *handle,
14608                        struct rte_flow_error *error)
14609 {
14610         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14611         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14612         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14613         struct mlx5_flow_counter *cnt;
14614         uint32_t no_flow_refcnt = 1;
14615         int ret;
14616
14617         switch (type) {
14618         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14619                 return __flow_dv_action_rss_release(dev, idx, error);
14620         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
14621                 cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
14622                 if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
14623                                                  &no_flow_refcnt, 1, false,
14624                                                  __ATOMIC_ACQUIRE,
14625                                                  __ATOMIC_RELAXED))
14626                         return rte_flow_error_set(error, EBUSY,
14627                                                   RTE_FLOW_ERROR_TYPE_ACTION,
14628                                                   NULL,
14629                                                   "Indirect count action has references");
14630                 flow_dv_counter_free(dev, idx);
14631                 return 0;
14632         case MLX5_INDIRECT_ACTION_TYPE_AGE:
14633                 ret = flow_dv_aso_age_release(dev, idx);
14634                 if (ret)
14635                         /*
14636                          * In this case, the last flow has a reference will
14637                          * actually release the age action.
14638                          */
14639                         DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
14640                                 " released with references %d.", idx, ret);
14641                 return 0;
14642         case MLX5_INDIRECT_ACTION_TYPE_CT:
14643                 ret = flow_dv_aso_ct_release(dev, idx);
14644                 if (ret < 0)
14645                         return ret;
14646                 if (ret > 0)
14647                         DRV_LOG(DEBUG, "Connection tracking object %u still "
14648                                 "has references %d.", idx, ret);
14649                 return 0;
14650         default:
14651                 return rte_flow_error_set(error, ENOTSUP,
14652                                           RTE_FLOW_ERROR_TYPE_ACTION,
14653                                           NULL,
14654                                           "action type not supported");
14655         }
14656 }
14657
14658 /**
14659  * Updates in place shared RSS action configuration.
14660  *
14661  * @param[in] dev
14662  *   Pointer to the Ethernet device structure.
14663  * @param[in] idx
14664  *   The shared RSS action object ID to be updated.
14665  * @param[in] action_conf
14666  *   RSS action specification used to modify *shared_rss*.
14667  * @param[out] error
14668  *   Perform verbose error reporting if not NULL. Initialized in case of
14669  *   error only.
14670  *
14671  * @return
14672  *   0 on success, otherwise negative errno value.
14673  * @note: currently only support update of RSS queues.
14674  */
14675 static int
14676 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
14677                             const struct rte_flow_action_rss *action_conf,
14678                             struct rte_flow_error *error)
14679 {
14680         struct mlx5_priv *priv = dev->data->dev_private;
14681         struct mlx5_shared_action_rss *shared_rss =
14682             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14683         int ret = 0;
14684         void *queue = NULL;
14685         uint16_t *queue_old = NULL;
14686         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
14687
14688         if (!shared_rss)
14689                 return rte_flow_error_set(error, EINVAL,
14690                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14691                                           "invalid shared action to update");
14692         if (priv->obj_ops.ind_table_modify == NULL)
14693                 return rte_flow_error_set(error, ENOTSUP,
14694                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14695                                           "cannot modify indirection table");
14696         queue = mlx5_malloc(MLX5_MEM_ZERO,
14697                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14698                             0, SOCKET_ID_ANY);
14699         if (!queue)
14700                 return rte_flow_error_set(error, ENOMEM,
14701                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14702                                           NULL,
14703                                           "cannot allocate resource memory");
14704         memcpy(queue, action_conf->queue, queue_size);
14705         MLX5_ASSERT(shared_rss->ind_tbl);
14706         rte_spinlock_lock(&shared_rss->action_rss_sl);
14707         queue_old = shared_rss->ind_tbl->queues;
14708         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
14709                                         queue, action_conf->queue_num, true);
14710         if (ret) {
14711                 mlx5_free(queue);
14712                 ret = rte_flow_error_set(error, rte_errno,
14713                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14714                                           "cannot update indirection table");
14715         } else {
14716                 mlx5_free(queue_old);
14717                 shared_rss->origin.queue = queue;
14718                 shared_rss->origin.queue_num = action_conf->queue_num;
14719         }
14720         rte_spinlock_unlock(&shared_rss->action_rss_sl);
14721         return ret;
14722 }
14723
14724 /*
14725  * Updates in place conntrack context or direction.
14726  * Context update should be synchronized.
14727  *
14728  * @param[in] dev
14729  *   Pointer to the Ethernet device structure.
14730  * @param[in] idx
14731  *   The conntrack object ID to be updated.
14732  * @param[in] update
14733  *   Pointer to the structure of information to update.
14734  * @param[out] error
14735  *   Perform verbose error reporting if not NULL. Initialized in case of
14736  *   error only.
14737  *
14738  * @return
14739  *   0 on success, otherwise negative errno value.
14740  */
14741 static int
14742 __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,
14743                            const struct rte_flow_modify_conntrack *update,
14744                            struct rte_flow_error *error)
14745 {
14746         struct mlx5_priv *priv = dev->data->dev_private;
14747         struct mlx5_aso_ct_action *ct;
14748         const struct rte_flow_action_conntrack *new_prf;
14749         int ret = 0;
14750         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
14751         uint32_t dev_idx;
14752
14753         if (PORT_ID(priv) != owner)
14754                 return rte_flow_error_set(error, EACCES,
14755                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14756                                           NULL,
14757                                           "CT object owned by another port");
14758         dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
14759         ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
14760         if (!ct->refcnt)
14761                 return rte_flow_error_set(error, ENOMEM,
14762                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14763                                           NULL,
14764                                           "CT object is inactive");
14765         new_prf = &update->new_ct;
14766         if (update->direction)
14767                 ct->is_original = !!new_prf->is_original_dir;
14768         if (update->state) {
14769                 /* Only validate the profile when it needs to be updated. */
14770                 ret = mlx5_validate_action_ct(dev, new_prf, error);
14771                 if (ret)
14772                         return ret;
14773                 ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
14774                 if (ret)
14775                         return rte_flow_error_set(error, EIO,
14776                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14777                                         NULL,
14778                                         "Failed to send CT context update WQE");
14779                 /* Block until ready or a failure. */
14780                 ret = mlx5_aso_ct_available(priv->sh, ct);
14781                 if (ret)
14782                         rte_flow_error_set(error, rte_errno,
14783                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14784                                            NULL,
14785                                            "Timeout to get the CT update");
14786         }
14787         return ret;
14788 }
14789
14790 /**
14791  * Updates in place shared action configuration, lock free,
14792  * (mutex should be acquired by caller).
14793  *
14794  * @param[in] dev
14795  *   Pointer to the Ethernet device structure.
14796  * @param[in] handle
14797  *   The indirect action object handle to be updated.
14798  * @param[in] update
14799  *   Action specification used to modify the action pointed by *handle*.
14800  *   *update* could be of same type with the action pointed by the *handle*
14801  *   handle argument, or some other structures like a wrapper, depending on
14802  *   the indirect action type.
14803  * @param[out] error
14804  *   Perform verbose error reporting if not NULL. Initialized in case of
14805  *   error only.
14806  *
14807  * @return
14808  *   0 on success, otherwise negative errno value.
14809  */
14810 static int
14811 flow_dv_action_update(struct rte_eth_dev *dev,
14812                         struct rte_flow_action_handle *handle,
14813                         const void *update,
14814                         struct rte_flow_error *err)
14815 {
14816         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14817         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14818         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14819         const void *action_conf;
14820
14821         switch (type) {
14822         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14823                 action_conf = ((const struct rte_flow_action *)update)->conf;
14824                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
14825         case MLX5_INDIRECT_ACTION_TYPE_CT:
14826                 return __flow_dv_action_ct_update(dev, idx, update, err);
14827         default:
14828                 return rte_flow_error_set(err, ENOTSUP,
14829                                           RTE_FLOW_ERROR_TYPE_ACTION,
14830                                           NULL,
14831                                           "action type update not supported");
14832         }
14833 }
14834
14835 /**
14836  * Destroy the meter sub policy table rules.
14837  * Lock free, (mutex should be acquired by caller).
14838  *
14839  * @param[in] dev
14840  *   Pointer to Ethernet device.
14841  * @param[in] sub_policy
14842  *   Pointer to meter sub policy table.
14843  */
14844 static void
14845 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
14846                              struct mlx5_flow_meter_sub_policy *sub_policy)
14847 {
14848         struct mlx5_priv *priv = dev->data->dev_private;
14849         struct mlx5_flow_tbl_data_entry *tbl;
14850         struct mlx5_flow_meter_policy *policy = sub_policy->main_policy;
14851         struct mlx5_flow_meter_info *next_fm;
14852         struct mlx5_sub_policy_color_rule *color_rule;
14853         void *tmp;
14854         uint32_t i;
14855
14856         for (i = 0; i < RTE_COLORS; i++) {
14857                 next_fm = NULL;
14858                 if (i == RTE_COLOR_GREEN && policy &&
14859                     policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR)
14860                         next_fm = mlx5_flow_meter_find(priv,
14861                                         policy->act_cnt[i].next_mtr_id, NULL);
14862                 TAILQ_FOREACH_SAFE(color_rule, &sub_policy->color_rules[i],
14863                                    next_port, tmp) {
14864                         claim_zero(mlx5_flow_os_destroy_flow(color_rule->rule));
14865                         tbl = container_of(color_rule->matcher->tbl,
14866                                         typeof(*tbl), tbl);
14867                         mlx5_list_unregister(tbl->matchers,
14868                                                 &color_rule->matcher->entry);
14869                         TAILQ_REMOVE(&sub_policy->color_rules[i],
14870                                         color_rule, next_port);
14871                         mlx5_free(color_rule);
14872                         if (next_fm)
14873                                 mlx5_flow_meter_detach(priv, next_fm);
14874                 }
14875         }
14876         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
14877                 if (sub_policy->rix_hrxq[i]) {
14878                         if (policy && !policy->is_hierarchy)
14879                                 mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
14880                         sub_policy->rix_hrxq[i] = 0;
14881                 }
14882                 if (sub_policy->jump_tbl[i]) {
14883                         flow_dv_tbl_resource_release(MLX5_SH(dev),
14884                         sub_policy->jump_tbl[i]);
14885                         sub_policy->jump_tbl[i] = NULL;
14886                 }
14887         }
14888         if (sub_policy->tbl_rsc) {
14889                 flow_dv_tbl_resource_release(MLX5_SH(dev),
14890                         sub_policy->tbl_rsc);
14891                 sub_policy->tbl_rsc = NULL;
14892         }
14893 }
14894
14895 /**
14896  * Destroy policy rules, lock free,
14897  * (mutex should be acquired by caller).
14898  * Dispatcher for action type specific call.
14899  *
14900  * @param[in] dev
14901  *   Pointer to the Ethernet device structure.
14902  * @param[in] mtr_policy
14903  *   Meter policy struct.
14904  */
14905 static void
14906 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
14907                       struct mlx5_flow_meter_policy *mtr_policy)
14908 {
14909         uint32_t i, j;
14910         struct mlx5_flow_meter_sub_policy *sub_policy;
14911         uint16_t sub_policy_num;
14912
14913         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14914                 sub_policy_num = (mtr_policy->sub_policy_num >>
14915                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
14916                         MLX5_MTR_SUB_POLICY_NUM_MASK;
14917                 for (j = 0; j < sub_policy_num; j++) {
14918                         sub_policy = mtr_policy->sub_policys[i][j];
14919                         if (sub_policy)
14920                                 __flow_dv_destroy_sub_policy_rules
14921                                                 (dev, sub_policy);
14922                 }
14923         }
14924 }
14925
14926 /**
14927  * Destroy policy action, lock free,
14928  * (mutex should be acquired by caller).
14929  * Dispatcher for action type specific call.
14930  *
14931  * @param[in] dev
14932  *   Pointer to the Ethernet device structure.
14933  * @param[in] mtr_policy
14934  *   Meter policy struct.
14935  */
14936 static void
14937 flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
14938                       struct mlx5_flow_meter_policy *mtr_policy)
14939 {
14940         struct rte_flow_action *rss_action;
14941         struct mlx5_flow_handle dev_handle;
14942         uint32_t i, j;
14943
14944         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
14945                 if (mtr_policy->act_cnt[i].rix_mark) {
14946                         flow_dv_tag_release(dev,
14947                                 mtr_policy->act_cnt[i].rix_mark);
14948                         mtr_policy->act_cnt[i].rix_mark = 0;
14949                 }
14950                 if (mtr_policy->act_cnt[i].modify_hdr) {
14951                         dev_handle.dvh.modify_hdr =
14952                                 mtr_policy->act_cnt[i].modify_hdr;
14953                         flow_dv_modify_hdr_resource_release(dev, &dev_handle);
14954                 }
14955                 switch (mtr_policy->act_cnt[i].fate_action) {
14956                 case MLX5_FLOW_FATE_SHARED_RSS:
14957                         rss_action = mtr_policy->act_cnt[i].rss;
14958                         mlx5_free(rss_action);
14959                         break;
14960                 case MLX5_FLOW_FATE_PORT_ID:
14961                         if (mtr_policy->act_cnt[i].rix_port_id_action) {
14962                                 flow_dv_port_id_action_resource_release(dev,
14963                                 mtr_policy->act_cnt[i].rix_port_id_action);
14964                                 mtr_policy->act_cnt[i].rix_port_id_action = 0;
14965                         }
14966                         break;
14967                 case MLX5_FLOW_FATE_DROP:
14968                 case MLX5_FLOW_FATE_JUMP:
14969                         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
14970                                 mtr_policy->act_cnt[i].dr_jump_action[j] =
14971                                                 NULL;
14972                         break;
14973                 default:
14974                         /*Queue action do nothing*/
14975                         break;
14976                 }
14977         }
14978         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
14979                 mtr_policy->dr_drop_action[j] = NULL;
14980 }
14981
14982 /**
14983  * Create policy action per domain, lock free,
14984  * (mutex should be acquired by caller).
14985  * Dispatcher for action type specific call.
14986  *
14987  * @param[in] dev
14988  *   Pointer to the Ethernet device structure.
14989  * @param[in] mtr_policy
14990  *   Meter policy struct.
14991  * @param[in] action
14992  *   Action specification used to create meter actions.
14993  * @param[out] error
14994  *   Perform verbose error reporting if not NULL. Initialized in case of
14995  *   error only.
14996  *
14997  * @return
14998  *   0 on success, otherwise negative errno value.
14999  */
15000 static int
15001 __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
15002                         struct mlx5_flow_meter_policy *mtr_policy,
15003                         const struct rte_flow_action *actions[RTE_COLORS],
15004                         enum mlx5_meter_domain domain,
15005                         struct rte_mtr_error *error)
15006 {
15007         struct mlx5_priv *priv = dev->data->dev_private;
15008         struct rte_flow_error flow_err;
15009         const struct rte_flow_action *act;
15010         uint64_t action_flags = 0;
15011         struct mlx5_flow_handle dh;
15012         struct mlx5_flow dev_flow;
15013         struct mlx5_flow_dv_port_id_action_resource port_id_action;
15014         int i, ret;
15015         uint8_t egress, transfer;
15016         struct mlx5_meter_policy_action_container *act_cnt = NULL;
15017         union {
15018                 struct mlx5_flow_dv_modify_hdr_resource res;
15019                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
15020                             sizeof(struct mlx5_modification_cmd) *
15021                             (MLX5_MAX_MODIFY_NUM + 1)];
15022         } mhdr_dummy;
15023         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
15024
15025         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15026         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15027         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
15028         memset(&dev_flow, 0, sizeof(struct mlx5_flow));
15029         memset(&port_id_action, 0,
15030                 sizeof(struct mlx5_flow_dv_port_id_action_resource));
15031         memset(mhdr_res, 0, sizeof(*mhdr_res));
15032         mhdr_res->ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
15033                                         egress ?
15034                                         MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
15035                                         MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
15036         dev_flow.handle = &dh;
15037         dev_flow.dv.port_id_action = &port_id_action;
15038         dev_flow.external = true;
15039         for (i = 0; i < RTE_COLORS; i++) {
15040                 if (i < MLX5_MTR_RTE_COLORS)
15041                         act_cnt = &mtr_policy->act_cnt[i];
15042                 for (act = actions[i];
15043                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
15044                         act++) {
15045                         switch (act->type) {
15046                         case RTE_FLOW_ACTION_TYPE_MARK:
15047                         {
15048                                 uint32_t tag_be = mlx5_flow_mark_set
15049                                         (((const struct rte_flow_action_mark *)
15050                                         (act->conf))->id);
15051
15052                                 if (i >= MLX5_MTR_RTE_COLORS)
15053                                         return -rte_mtr_error_set(error,
15054                                           ENOTSUP,
15055                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15056                                           NULL,
15057                                           "cannot create policy "
15058                                           "mark action for this color");
15059                                 dev_flow.handle->mark = 1;
15060                                 if (flow_dv_tag_resource_register(dev, tag_be,
15061                                                   &dev_flow, &flow_err))
15062                                         return -rte_mtr_error_set(error,
15063                                         ENOTSUP,
15064                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15065                                         NULL,
15066                                         "cannot setup policy mark action");
15067                                 MLX5_ASSERT(dev_flow.dv.tag_resource);
15068                                 act_cnt->rix_mark =
15069                                         dev_flow.handle->dvh.rix_tag;
15070                                 action_flags |= MLX5_FLOW_ACTION_MARK;
15071                                 break;
15072                         }
15073                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
15074                                 if (i >= MLX5_MTR_RTE_COLORS)
15075                                         return -rte_mtr_error_set(error,
15076                                           ENOTSUP,
15077                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15078                                           NULL,
15079                                           "cannot create policy "
15080                                           "set tag action for this color");
15081                                 if (flow_dv_convert_action_set_tag
15082                                 (dev, mhdr_res,
15083                                 (const struct rte_flow_action_set_tag *)
15084                                 act->conf,  &flow_err))
15085                                         return -rte_mtr_error_set(error,
15086                                         ENOTSUP,
15087                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15088                                         NULL, "cannot convert policy "
15089                                         "set tag action");
15090                                 if (!mhdr_res->actions_num)
15091                                         return -rte_mtr_error_set(error,
15092                                         ENOTSUP,
15093                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15094                                         NULL, "cannot find policy "
15095                                         "set tag action");
15096                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
15097                                 break;
15098                         case RTE_FLOW_ACTION_TYPE_DROP:
15099                         {
15100                                 struct mlx5_flow_mtr_mng *mtrmng =
15101                                                 priv->sh->mtrmng;
15102                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15103
15104                                 /*
15105                                  * Create the drop table with
15106                                  * METER DROP level.
15107                                  */
15108                                 if (!mtrmng->drop_tbl[domain]) {
15109                                         mtrmng->drop_tbl[domain] =
15110                                         flow_dv_tbl_resource_get(dev,
15111                                         MLX5_FLOW_TABLE_LEVEL_METER,
15112                                         egress, transfer, false, NULL, 0,
15113                                         0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
15114                                         if (!mtrmng->drop_tbl[domain])
15115                                                 return -rte_mtr_error_set
15116                                         (error, ENOTSUP,
15117                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15118                                         NULL,
15119                                         "Failed to create meter drop table");
15120                                 }
15121                                 tbl_data = container_of
15122                                 (mtrmng->drop_tbl[domain],
15123                                 struct mlx5_flow_tbl_data_entry, tbl);
15124                                 if (i < MLX5_MTR_RTE_COLORS) {
15125                                         act_cnt->dr_jump_action[domain] =
15126                                                 tbl_data->jump.action;
15127                                         act_cnt->fate_action =
15128                                                 MLX5_FLOW_FATE_DROP;
15129                                 }
15130                                 if (i == RTE_COLOR_RED)
15131                                         mtr_policy->dr_drop_action[domain] =
15132                                                 tbl_data->jump.action;
15133                                 action_flags |= MLX5_FLOW_ACTION_DROP;
15134                                 break;
15135                         }
15136                         case RTE_FLOW_ACTION_TYPE_QUEUE:
15137                         {
15138                                 if (i >= MLX5_MTR_RTE_COLORS)
15139                                         return -rte_mtr_error_set(error,
15140                                         ENOTSUP,
15141                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15142                                         NULL, "cannot create policy "
15143                                         "fate queue for this color");
15144                                 act_cnt->queue =
15145                                 ((const struct rte_flow_action_queue *)
15146                                         (act->conf))->index;
15147                                 act_cnt->fate_action =
15148                                         MLX5_FLOW_FATE_QUEUE;
15149                                 dev_flow.handle->fate_action =
15150                                         MLX5_FLOW_FATE_QUEUE;
15151                                 mtr_policy->is_queue = 1;
15152                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
15153                                 break;
15154                         }
15155                         case RTE_FLOW_ACTION_TYPE_RSS:
15156                         {
15157                                 int rss_size;
15158
15159                                 if (i >= MLX5_MTR_RTE_COLORS)
15160                                         return -rte_mtr_error_set(error,
15161                                           ENOTSUP,
15162                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15163                                           NULL,
15164                                           "cannot create policy "
15165                                           "rss action for this color");
15166                                 /*
15167                                  * Save RSS conf into policy struct
15168                                  * for translate stage.
15169                                  */
15170                                 rss_size = (int)rte_flow_conv
15171                                         (RTE_FLOW_CONV_OP_ACTION,
15172                                         NULL, 0, act, &flow_err);
15173                                 if (rss_size <= 0)
15174                                         return -rte_mtr_error_set(error,
15175                                           ENOTSUP,
15176                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15177                                           NULL, "Get the wrong "
15178                                           "rss action struct size");
15179                                 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
15180                                                 rss_size, 0, SOCKET_ID_ANY);
15181                                 if (!act_cnt->rss)
15182                                         return -rte_mtr_error_set(error,
15183                                           ENOTSUP,
15184                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15185                                           NULL,
15186                                           "Fail to malloc rss action memory");
15187                                 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
15188                                         act_cnt->rss, rss_size,
15189                                         act, &flow_err);
15190                                 if (ret < 0)
15191                                         return -rte_mtr_error_set(error,
15192                                           ENOTSUP,
15193                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15194                                           NULL, "Fail to save "
15195                                           "rss action into policy struct");
15196                                 act_cnt->fate_action =
15197                                         MLX5_FLOW_FATE_SHARED_RSS;
15198                                 action_flags |= MLX5_FLOW_ACTION_RSS;
15199                                 break;
15200                         }
15201                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
15202                         {
15203                                 struct mlx5_flow_dv_port_id_action_resource
15204                                         port_id_resource;
15205                                 uint32_t port_id = 0;
15206
15207                                 if (i >= MLX5_MTR_RTE_COLORS)
15208                                         return -rte_mtr_error_set(error,
15209                                         ENOTSUP,
15210                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15211                                         NULL, "cannot create policy "
15212                                         "port action for this color");
15213                                 memset(&port_id_resource, 0,
15214                                         sizeof(port_id_resource));
15215                                 if (flow_dv_translate_action_port_id(dev, act,
15216                                                 &port_id, &flow_err))
15217                                         return -rte_mtr_error_set(error,
15218                                         ENOTSUP,
15219                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15220                                         NULL, "cannot translate "
15221                                         "policy port action");
15222                                 port_id_resource.port_id = port_id;
15223                                 if (flow_dv_port_id_action_resource_register
15224                                         (dev, &port_id_resource,
15225                                         &dev_flow, &flow_err))
15226                                         return -rte_mtr_error_set(error,
15227                                         ENOTSUP,
15228                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15229                                         NULL, "cannot setup "
15230                                         "policy port action");
15231                                 act_cnt->rix_port_id_action =
15232                                         dev_flow.handle->rix_port_id_action;
15233                                 act_cnt->fate_action =
15234                                         MLX5_FLOW_FATE_PORT_ID;
15235                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
15236                                 break;
15237                         }
15238                         case RTE_FLOW_ACTION_TYPE_JUMP:
15239                         {
15240                                 uint32_t jump_group = 0;
15241                                 uint32_t table = 0;
15242                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15243                                 struct flow_grp_info grp_info = {
15244                                         .external = !!dev_flow.external,
15245                                         .transfer = !!transfer,
15246                                         .fdb_def_rule = !!priv->fdb_def_rule,
15247                                         .std_tbl_fix = 0,
15248                                         .skip_scale = dev_flow.skip_scale &
15249                                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
15250                                 };
15251                                 struct mlx5_flow_meter_sub_policy *sub_policy =
15252                                 mtr_policy->sub_policys[domain][0];
15253
15254                                 if (i >= MLX5_MTR_RTE_COLORS)
15255                                         return -rte_mtr_error_set(error,
15256                                           ENOTSUP,
15257                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15258                                           NULL,
15259                                           "cannot create policy "
15260                                           "jump action for this color");
15261                                 jump_group =
15262                                 ((const struct rte_flow_action_jump *)
15263                                                         act->conf)->group;
15264                                 if (mlx5_flow_group_to_table(dev, NULL,
15265                                                        jump_group,
15266                                                        &table,
15267                                                        &grp_info, &flow_err))
15268                                         return -rte_mtr_error_set(error,
15269                                         ENOTSUP,
15270                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15271                                         NULL, "cannot setup "
15272                                         "policy jump action");
15273                                 sub_policy->jump_tbl[i] =
15274                                 flow_dv_tbl_resource_get(dev,
15275                                         table, egress,
15276                                         transfer,
15277                                         !!dev_flow.external,
15278                                         NULL, jump_group, 0,
15279                                         0, &flow_err);
15280                                 if
15281                                 (!sub_policy->jump_tbl[i])
15282                                         return  -rte_mtr_error_set(error,
15283                                         ENOTSUP,
15284                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15285                                         NULL, "cannot create jump action.");
15286                                 tbl_data = container_of
15287                                 (sub_policy->jump_tbl[i],
15288                                 struct mlx5_flow_tbl_data_entry, tbl);
15289                                 act_cnt->dr_jump_action[domain] =
15290                                         tbl_data->jump.action;
15291                                 act_cnt->fate_action =
15292                                         MLX5_FLOW_FATE_JUMP;
15293                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
15294                                 break;
15295                         }
15296                         case RTE_FLOW_ACTION_TYPE_METER:
15297                         {
15298                                 const struct rte_flow_action_meter *mtr;
15299                                 struct mlx5_flow_meter_info *next_fm;
15300                                 struct mlx5_flow_meter_policy *next_policy;
15301                                 struct rte_flow_action tag_action;
15302                                 struct mlx5_rte_flow_action_set_tag set_tag;
15303                                 uint32_t next_mtr_idx = 0;
15304
15305                                 mtr = act->conf;
15306                                 next_fm = mlx5_flow_meter_find(priv,
15307                                                         mtr->mtr_id,
15308                                                         &next_mtr_idx);
15309                                 if (!next_fm)
15310                                         return -rte_mtr_error_set(error, EINVAL,
15311                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15312                                                 "Fail to find next meter.");
15313                                 if (next_fm->def_policy)
15314                                         return -rte_mtr_error_set(error, EINVAL,
15315                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15316                                 "Hierarchy only supports termination meter.");
15317                                 next_policy = mlx5_flow_meter_policy_find(dev,
15318                                                 next_fm->policy_id, NULL);
15319                                 MLX5_ASSERT(next_policy);
15320                                 if (next_fm->drop_cnt) {
15321                                         set_tag.id =
15322                                                 (enum modify_reg)
15323                                                 mlx5_flow_get_reg_id(dev,
15324                                                 MLX5_MTR_ID,
15325                                                 0,
15326                                                 (struct rte_flow_error *)error);
15327                                         set_tag.offset = (priv->mtr_reg_share ?
15328                                                 MLX5_MTR_COLOR_BITS : 0);
15329                                         set_tag.length = (priv->mtr_reg_share ?
15330                                                MLX5_MTR_IDLE_BITS_IN_COLOR_REG :
15331                                                MLX5_REG_BITS);
15332                                         set_tag.data = next_mtr_idx;
15333                                         tag_action.type =
15334                                                 (enum rte_flow_action_type)
15335                                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG;
15336                                         tag_action.conf = &set_tag;
15337                                         if (flow_dv_convert_action_set_reg
15338                                                 (mhdr_res, &tag_action,
15339                                                 (struct rte_flow_error *)error))
15340                                                 return -rte_errno;
15341                                         action_flags |=
15342                                                 MLX5_FLOW_ACTION_SET_TAG;
15343                                 }
15344                                 act_cnt->fate_action = MLX5_FLOW_FATE_MTR;
15345                                 act_cnt->next_mtr_id = next_fm->meter_id;
15346                                 act_cnt->next_sub_policy = NULL;
15347                                 mtr_policy->is_hierarchy = 1;
15348                                 mtr_policy->dev = next_policy->dev;
15349                                 action_flags |=
15350                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
15351                                 break;
15352                         }
15353                         default:
15354                                 return -rte_mtr_error_set(error, ENOTSUP,
15355                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15356                                           NULL, "action type not supported");
15357                         }
15358                         if (action_flags & MLX5_FLOW_ACTION_SET_TAG) {
15359                                 /* create modify action if needed. */
15360                                 dev_flow.dv.group = 1;
15361                                 if (flow_dv_modify_hdr_resource_register
15362                                         (dev, mhdr_res, &dev_flow, &flow_err))
15363                                         return -rte_mtr_error_set(error,
15364                                                 ENOTSUP,
15365                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15366                                                 NULL, "cannot register policy "
15367                                                 "set tag action");
15368                                 act_cnt->modify_hdr =
15369                                         dev_flow.handle->dvh.modify_hdr;
15370                         }
15371                 }
15372         }
15373         return 0;
15374 }
15375
15376 /**
15377  * Create policy action per domain, lock free,
15378  * (mutex should be acquired by caller).
15379  * Dispatcher for action type specific call.
15380  *
15381  * @param[in] dev
15382  *   Pointer to the Ethernet device structure.
15383  * @param[in] mtr_policy
15384  *   Meter policy struct.
15385  * @param[in] action
15386  *   Action specification used to create meter actions.
15387  * @param[out] error
15388  *   Perform verbose error reporting if not NULL. Initialized in case of
15389  *   error only.
15390  *
15391  * @return
15392  *   0 on success, otherwise negative errno value.
15393  */
15394 static int
15395 flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
15396                       struct mlx5_flow_meter_policy *mtr_policy,
15397                       const struct rte_flow_action *actions[RTE_COLORS],
15398                       struct rte_mtr_error *error)
15399 {
15400         int ret, i;
15401         uint16_t sub_policy_num;
15402
15403         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15404                 sub_policy_num = (mtr_policy->sub_policy_num >>
15405                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15406                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15407                 if (sub_policy_num) {
15408                         ret = __flow_dv_create_domain_policy_acts(dev,
15409                                 mtr_policy, actions,
15410                                 (enum mlx5_meter_domain)i, error);
15411                         if (ret)
15412                                 return ret;
15413                 }
15414         }
15415         return 0;
15416 }
15417
15418 /**
15419  * Query a DV flow rule for its statistics via DevX.
15420  *
15421  * @param[in] dev
15422  *   Pointer to Ethernet device.
15423  * @param[in] cnt_idx
15424  *   Index to the flow counter.
15425  * @param[out] data
15426  *   Data retrieved by the query.
15427  * @param[out] error
15428  *   Perform verbose error reporting if not NULL.
15429  *
15430  * @return
15431  *   0 on success, a negative errno value otherwise and rte_errno is set.
15432  */
15433 static int
15434 flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
15435                     struct rte_flow_error *error)
15436 {
15437         struct mlx5_priv *priv = dev->data->dev_private;
15438         struct rte_flow_query_count *qc = data;
15439
15440         if (!priv->config.devx)
15441                 return rte_flow_error_set(error, ENOTSUP,
15442                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15443                                           NULL,
15444                                           "counters are not supported");
15445         if (cnt_idx) {
15446                 uint64_t pkts, bytes;
15447                 struct mlx5_flow_counter *cnt;
15448                 int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
15449
15450                 if (err)
15451                         return rte_flow_error_set(error, -err,
15452                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15453                                         NULL, "cannot read counters");
15454                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15455                 qc->hits_set = 1;
15456                 qc->bytes_set = 1;
15457                 qc->hits = pkts - cnt->hits;
15458                 qc->bytes = bytes - cnt->bytes;
15459                 if (qc->reset) {
15460                         cnt->hits = pkts;
15461                         cnt->bytes = bytes;
15462                 }
15463                 return 0;
15464         }
15465         return rte_flow_error_set(error, EINVAL,
15466                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15467                                   NULL,
15468                                   "counters are not available");
15469 }
15470
15471 static int
15472 flow_dv_action_query(struct rte_eth_dev *dev,
15473                      const struct rte_flow_action_handle *handle, void *data,
15474                      struct rte_flow_error *error)
15475 {
15476         struct mlx5_age_param *age_param;
15477         struct rte_flow_query_age *resp;
15478         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15479         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15480         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15481         struct mlx5_priv *priv = dev->data->dev_private;
15482         struct mlx5_aso_ct_action *ct;
15483         uint16_t owner;
15484         uint32_t dev_idx;
15485
15486         switch (type) {
15487         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15488                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
15489                 resp = data;
15490                 resp->aged = __atomic_load_n(&age_param->state,
15491                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
15492                                                                           1 : 0;
15493                 resp->sec_since_last_hit_valid = !resp->aged;
15494                 if (resp->sec_since_last_hit_valid)
15495                         resp->sec_since_last_hit = __atomic_load_n
15496                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15497                 return 0;
15498         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15499                 return flow_dv_query_count(dev, idx, data, error);
15500         case MLX5_INDIRECT_ACTION_TYPE_CT:
15501                 owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15502                 if (owner != PORT_ID(priv))
15503                         return rte_flow_error_set(error, EACCES,
15504                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15505                                         NULL,
15506                                         "CT object owned by another port");
15507                 dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15508                 ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15509                 MLX5_ASSERT(ct);
15510                 if (!ct->refcnt)
15511                         return rte_flow_error_set(error, EFAULT,
15512                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15513                                         NULL,
15514                                         "CT object is inactive");
15515                 ((struct rte_flow_action_conntrack *)data)->peer_port =
15516                                                         ct->peer;
15517                 ((struct rte_flow_action_conntrack *)data)->is_original_dir =
15518                                                         ct->is_original;
15519                 if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))
15520                         return rte_flow_error_set(error, EIO,
15521                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15522                                         NULL,
15523                                         "Failed to query CT context");
15524                 return 0;
15525         default:
15526                 return rte_flow_error_set(error, ENOTSUP,
15527                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15528                                           "action type query not supported");
15529         }
15530 }
15531
15532 /**
15533  * Query a flow rule AGE action for aging information.
15534  *
15535  * @param[in] dev
15536  *   Pointer to Ethernet device.
15537  * @param[in] flow
15538  *   Pointer to the sub flow.
15539  * @param[out] data
15540  *   data retrieved by the query.
15541  * @param[out] error
15542  *   Perform verbose error reporting if not NULL.
15543  *
15544  * @return
15545  *   0 on success, a negative errno value otherwise and rte_errno is set.
15546  */
15547 static int
15548 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
15549                   void *data, struct rte_flow_error *error)
15550 {
15551         struct rte_flow_query_age *resp = data;
15552         struct mlx5_age_param *age_param;
15553
15554         if (flow->age) {
15555                 struct mlx5_aso_age_action *act =
15556                                      flow_aso_age_get_by_idx(dev, flow->age);
15557
15558                 age_param = &act->age_params;
15559         } else if (flow->counter) {
15560                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
15561
15562                 if (!age_param || !age_param->timeout)
15563                         return rte_flow_error_set
15564                                         (error, EINVAL,
15565                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15566                                          NULL, "cannot read age data");
15567         } else {
15568                 return rte_flow_error_set(error, EINVAL,
15569                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15570                                           NULL, "age data not available");
15571         }
15572         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
15573                                      AGE_TMOUT ? 1 : 0;
15574         resp->sec_since_last_hit_valid = !resp->aged;
15575         if (resp->sec_since_last_hit_valid)
15576                 resp->sec_since_last_hit = __atomic_load_n
15577                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15578         return 0;
15579 }
15580
15581 /**
15582  * Query a flow.
15583  *
15584  * @see rte_flow_query()
15585  * @see rte_flow_ops
15586  */
15587 static int
15588 flow_dv_query(struct rte_eth_dev *dev,
15589               struct rte_flow *flow __rte_unused,
15590               const struct rte_flow_action *actions __rte_unused,
15591               void *data __rte_unused,
15592               struct rte_flow_error *error __rte_unused)
15593 {
15594         int ret = -EINVAL;
15595
15596         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
15597                 switch (actions->type) {
15598                 case RTE_FLOW_ACTION_TYPE_VOID:
15599                         break;
15600                 case RTE_FLOW_ACTION_TYPE_COUNT:
15601                         ret = flow_dv_query_count(dev, flow->counter, data,
15602                                                   error);
15603                         break;
15604                 case RTE_FLOW_ACTION_TYPE_AGE:
15605                         ret = flow_dv_query_age(dev, flow, data, error);
15606                         break;
15607                 default:
15608                         return rte_flow_error_set(error, ENOTSUP,
15609                                                   RTE_FLOW_ERROR_TYPE_ACTION,
15610                                                   actions,
15611                                                   "action not supported");
15612                 }
15613         }
15614         return ret;
15615 }
15616
15617 /**
15618  * Destroy the meter table set.
15619  * Lock free, (mutex should be acquired by caller).
15620  *
15621  * @param[in] dev
15622  *   Pointer to Ethernet device.
15623  * @param[in] fm
15624  *   Meter information table.
15625  */
15626 static void
15627 flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
15628                         struct mlx5_flow_meter_info *fm)
15629 {
15630         struct mlx5_priv *priv = dev->data->dev_private;
15631         int i;
15632
15633         if (!fm || !priv->config.dv_flow_en)
15634                 return;
15635         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15636                 if (fm->drop_rule[i]) {
15637                         claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
15638                         fm->drop_rule[i] = NULL;
15639                 }
15640         }
15641 }
15642
15643 static void
15644 flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
15645 {
15646         struct mlx5_priv *priv = dev->data->dev_private;
15647         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15648         struct mlx5_flow_tbl_data_entry *tbl;
15649         int i, j;
15650
15651         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15652                 if (mtrmng->def_rule[i]) {
15653                         claim_zero(mlx5_flow_os_destroy_flow
15654                                         (mtrmng->def_rule[i]));
15655                         mtrmng->def_rule[i] = NULL;
15656                 }
15657                 if (mtrmng->def_matcher[i]) {
15658                         tbl = container_of(mtrmng->def_matcher[i]->tbl,
15659                                 struct mlx5_flow_tbl_data_entry, tbl);
15660                         mlx5_list_unregister(tbl->matchers,
15661                                              &mtrmng->def_matcher[i]->entry);
15662                         mtrmng->def_matcher[i] = NULL;
15663                 }
15664                 for (j = 0; j < MLX5_REG_BITS; j++) {
15665                         if (mtrmng->drop_matcher[i][j]) {
15666                                 tbl =
15667                                 container_of(mtrmng->drop_matcher[i][j]->tbl,
15668                                              struct mlx5_flow_tbl_data_entry,
15669                                              tbl);
15670                                 mlx5_list_unregister(tbl->matchers,
15671                                             &mtrmng->drop_matcher[i][j]->entry);
15672                                 mtrmng->drop_matcher[i][j] = NULL;
15673                         }
15674                 }
15675                 if (mtrmng->drop_tbl[i]) {
15676                         flow_dv_tbl_resource_release(MLX5_SH(dev),
15677                                 mtrmng->drop_tbl[i]);
15678                         mtrmng->drop_tbl[i] = NULL;
15679                 }
15680         }
15681 }
15682
15683 /* Number of meter flow actions, count and jump or count and drop. */
15684 #define METER_ACTIONS 2
15685
15686 static void
15687 __flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
15688                               enum mlx5_meter_domain domain)
15689 {
15690         struct mlx5_priv *priv = dev->data->dev_private;
15691         struct mlx5_flow_meter_def_policy *def_policy =
15692                         priv->sh->mtrmng->def_policy[domain];
15693
15694         __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
15695         mlx5_free(def_policy);
15696         priv->sh->mtrmng->def_policy[domain] = NULL;
15697 }
15698
15699 /**
15700  * Destroy the default policy table set.
15701  *
15702  * @param[in] dev
15703  *   Pointer to Ethernet device.
15704  */
15705 static void
15706 flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
15707 {
15708         struct mlx5_priv *priv = dev->data->dev_private;
15709         int i;
15710
15711         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
15712                 if (priv->sh->mtrmng->def_policy[i])
15713                         __flow_dv_destroy_domain_def_policy(dev,
15714                                         (enum mlx5_meter_domain)i);
15715         priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
15716 }
15717
15718 static int
15719 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
15720                         uint32_t color_reg_c_idx,
15721                         enum rte_color color, void *matcher_object,
15722                         int actions_n, void *actions,
15723                         bool match_src_port, const struct rte_flow_item *item,
15724                         void **rule, const struct rte_flow_attr *attr)
15725 {
15726         int ret;
15727         struct mlx5_flow_dv_match_params value = {
15728                 .size = sizeof(value.buf),
15729         };
15730         struct mlx5_flow_dv_match_params matcher = {
15731                 .size = sizeof(matcher.buf),
15732         };
15733         struct mlx5_priv *priv = dev->data->dev_private;
15734         uint8_t misc_mask;
15735
15736         if (match_src_port && (priv->representor || priv->master)) {
15737                 if (flow_dv_translate_item_port_id(dev, matcher.buf,
15738                                                    value.buf, item, attr)) {
15739                         DRV_LOG(ERR,
15740                         "Failed to create meter policy flow with port.");
15741                         return -1;
15742                 }
15743         }
15744         flow_dv_match_meta_reg(matcher.buf, value.buf,
15745                                 (enum modify_reg)color_reg_c_idx,
15746                                 rte_col_2_mlx5_col(color),
15747                                 UINT32_MAX);
15748         misc_mask = flow_dv_matcher_enable(value.buf);
15749         __flow_dv_adjust_buf_size(&value.size, misc_mask);
15750         ret = mlx5_flow_os_create_flow(matcher_object,
15751                         (void *)&value, actions_n, actions, rule);
15752         if (ret) {
15753                 DRV_LOG(ERR, "Failed to create meter policy flow.");
15754                 return -1;
15755         }
15756         return 0;
15757 }
15758
15759 static int
15760 __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
15761                         uint32_t color_reg_c_idx,
15762                         uint16_t priority,
15763                         struct mlx5_flow_meter_sub_policy *sub_policy,
15764                         const struct rte_flow_attr *attr,
15765                         bool match_src_port,
15766                         const struct rte_flow_item *item,
15767                         struct mlx5_flow_dv_matcher **policy_matcher,
15768                         struct rte_flow_error *error)
15769 {
15770         struct mlx5_list_entry *entry;
15771         struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
15772         struct mlx5_flow_dv_matcher matcher = {
15773                 .mask = {
15774                         .size = sizeof(matcher.mask.buf),
15775                 },
15776                 .tbl = tbl_rsc,
15777         };
15778         struct mlx5_flow_dv_match_params value = {
15779                 .size = sizeof(value.buf),
15780         };
15781         struct mlx5_flow_cb_ctx ctx = {
15782                 .error = error,
15783                 .data = &matcher,
15784         };
15785         struct mlx5_flow_tbl_data_entry *tbl_data;
15786         struct mlx5_priv *priv = dev->data->dev_private;
15787         uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
15788
15789         if (match_src_port && (priv->representor || priv->master)) {
15790                 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
15791                                                    value.buf, item, attr)) {
15792                         DRV_LOG(ERR,
15793                         "Failed to register meter drop matcher with port.");
15794                         return -1;
15795                 }
15796         }
15797         tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
15798         if (priority < RTE_COLOR_RED)
15799                 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
15800                         (enum modify_reg)color_reg_c_idx, 0, color_mask);
15801         matcher.priority = priority;
15802         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
15803                                         matcher.mask.size);
15804         entry = mlx5_list_register(tbl_data->matchers, &ctx);
15805         if (!entry) {
15806                 DRV_LOG(ERR, "Failed to register meter drop matcher.");
15807                 return -1;
15808         }
15809         *policy_matcher =
15810                 container_of(entry, struct mlx5_flow_dv_matcher, entry);
15811         return 0;
15812 }
15813
15814 /**
15815  * Create the policy rules per domain.
15816  *
15817  * @param[in] dev
15818  *   Pointer to Ethernet device.
15819  * @param[in] sub_policy
15820  *    Pointer to sub policy table..
15821  * @param[in] egress
15822  *   Direction of the table.
15823  * @param[in] transfer
15824  *   E-Switch or NIC flow.
15825  * @param[in] acts
15826  *   Pointer to policy action list per color.
15827  *
15828  * @return
15829  *   0 on success, -1 otherwise.
15830  */
15831 static int
15832 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
15833                 struct mlx5_flow_meter_sub_policy *sub_policy,
15834                 uint8_t egress, uint8_t transfer, bool match_src_port,
15835                 struct mlx5_meter_policy_acts acts[RTE_COLORS])
15836 {
15837         struct mlx5_priv *priv = dev->data->dev_private;
15838         struct rte_flow_error flow_err;
15839         uint32_t color_reg_c_idx;
15840         struct rte_flow_attr attr = {
15841                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
15842                 .priority = 0,
15843                 .ingress = 0,
15844                 .egress = !!egress,
15845                 .transfer = !!transfer,
15846                 .reserved = 0,
15847         };
15848         int i;
15849         int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
15850         struct mlx5_sub_policy_color_rule *color_rule;
15851
15852         if (ret < 0)
15853                 return -1;
15854         /* Create policy table with POLICY level. */
15855         if (!sub_policy->tbl_rsc)
15856                 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
15857                                 MLX5_FLOW_TABLE_LEVEL_POLICY,
15858                                 egress, transfer, false, NULL, 0, 0,
15859                                 sub_policy->idx, &flow_err);
15860         if (!sub_policy->tbl_rsc) {
15861                 DRV_LOG(ERR,
15862                         "Failed to create meter sub policy table.");
15863                 return -1;
15864         }
15865         /* Prepare matchers. */
15866         color_reg_c_idx = ret;
15867         for (i = 0; i < RTE_COLORS; i++) {
15868                 TAILQ_INIT(&sub_policy->color_rules[i]);
15869                 if (i == RTE_COLOR_YELLOW || !acts[i].actions_n)
15870                         continue;
15871                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
15872                                 sizeof(struct mlx5_sub_policy_color_rule),
15873                                 0, SOCKET_ID_ANY);
15874                 if (!color_rule) {
15875                         DRV_LOG(ERR, "No memory to create color rule.");
15876                         goto err_exit;
15877                 }
15878                 color_rule->src_port = priv->representor_id;
15879                 attr.priority = i;
15880                 /* Create matchers for Color. */
15881                 if (__flow_dv_create_policy_matcher(dev,
15882                                 color_reg_c_idx, i, sub_policy, &attr,
15883                                 (i != RTE_COLOR_RED ? match_src_port : false),
15884                                 NULL, &color_rule->matcher, &flow_err)) {
15885                         DRV_LOG(ERR, "Failed to create color matcher.");
15886                         goto err_exit;
15887                 }
15888                 /* Create flow, matching color. */
15889                 if (__flow_dv_create_policy_flow(dev,
15890                                 color_reg_c_idx, (enum rte_color)i,
15891                                 color_rule->matcher->matcher_object,
15892                                 acts[i].actions_n,
15893                                 acts[i].dv_actions,
15894                                 (i != RTE_COLOR_RED ? match_src_port : false),
15895                                 NULL, &color_rule->rule,
15896                                 &attr)) {
15897                         DRV_LOG(ERR, "Failed to create color rule.");
15898                         goto err_exit;
15899                 }
15900                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
15901                                   color_rule, next_port);
15902         }
15903         return 0;
15904 err_exit:
15905         if (color_rule) {
15906                 if (color_rule->rule)
15907                         mlx5_flow_os_destroy_flow(color_rule->rule);
15908                 if (color_rule->matcher) {
15909                         struct mlx5_flow_tbl_data_entry *tbl =
15910                                 container_of(color_rule->matcher->tbl,
15911                                                 typeof(*tbl), tbl);
15912                         mlx5_list_unregister(tbl->matchers,
15913                                                 &color_rule->matcher->entry);
15914                 }
15915                 mlx5_free(color_rule);
15916         }
15917         return -1;
15918 }
15919
15920 static int
15921 __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
15922                         struct mlx5_flow_meter_policy *mtr_policy,
15923                         struct mlx5_flow_meter_sub_policy *sub_policy,
15924                         uint32_t domain)
15925 {
15926         struct mlx5_priv *priv = dev->data->dev_private;
15927         struct mlx5_meter_policy_acts acts[RTE_COLORS];
15928         struct mlx5_flow_dv_tag_resource *tag;
15929         struct mlx5_flow_dv_port_id_action_resource *port_action;
15930         struct mlx5_hrxq *hrxq;
15931         struct mlx5_flow_meter_info *next_fm = NULL;
15932         struct mlx5_flow_meter_policy *next_policy;
15933         struct mlx5_flow_meter_sub_policy *next_sub_policy;
15934         struct mlx5_flow_tbl_data_entry *tbl_data;
15935         struct rte_flow_error error;
15936         uint8_t egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15937         uint8_t transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15938         bool mtr_first = egress || (transfer && priv->representor_id != UINT16_MAX);
15939         bool match_src_port = false;
15940         int i;
15941
15942         for (i = 0; i < RTE_COLORS; i++) {
15943                 acts[i].actions_n = 0;
15944                 if (i == RTE_COLOR_YELLOW)
15945                         continue;
15946                 if (i == RTE_COLOR_RED) {
15947                         /* Only support drop on red. */
15948                         acts[i].dv_actions[0] =
15949                         mtr_policy->dr_drop_action[domain];
15950                         acts[i].actions_n = 1;
15951                         continue;
15952                 }
15953                 if (mtr_policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR) {
15954                         struct rte_flow_attr attr = {
15955                                 .transfer = transfer
15956                         };
15957
15958                         next_fm = mlx5_flow_meter_find(priv,
15959                                         mtr_policy->act_cnt[i].next_mtr_id,
15960                                         NULL);
15961                         if (!next_fm) {
15962                                 DRV_LOG(ERR,
15963                                         "Failed to get next hierarchy meter.");
15964                                 goto err_exit;
15965                         }
15966                         if (mlx5_flow_meter_attach(priv, next_fm,
15967                                                    &attr, &error)) {
15968                                 DRV_LOG(ERR, "%s", error.message);
15969                                 next_fm = NULL;
15970                                 goto err_exit;
15971                         }
15972                         /* Meter action must be the first for TX. */
15973                         if (mtr_first) {
15974                                 acts[i].dv_actions[acts[i].actions_n] =
15975                                         next_fm->meter_action;
15976                                 acts[i].actions_n++;
15977                         }
15978                 }
15979                 if (mtr_policy->act_cnt[i].rix_mark) {
15980                         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
15981                                         mtr_policy->act_cnt[i].rix_mark);
15982                         if (!tag) {
15983                                 DRV_LOG(ERR, "Failed to find "
15984                                 "mark action for policy.");
15985                                 goto err_exit;
15986                         }
15987                         acts[i].dv_actions[acts[i].actions_n] =
15988                                                 tag->action;
15989                         acts[i].actions_n++;
15990                 }
15991                 if (mtr_policy->act_cnt[i].modify_hdr) {
15992                         acts[i].dv_actions[acts[i].actions_n] =
15993                         mtr_policy->act_cnt[i].modify_hdr->action;
15994                         acts[i].actions_n++;
15995                 }
15996                 if (mtr_policy->act_cnt[i].fate_action) {
15997                         switch (mtr_policy->act_cnt[i].fate_action) {
15998                         case MLX5_FLOW_FATE_PORT_ID:
15999                                 port_action = mlx5_ipool_get
16000                                         (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
16001                                 mtr_policy->act_cnt[i].rix_port_id_action);
16002                                 if (!port_action) {
16003                                         DRV_LOG(ERR, "Failed to find "
16004                                                 "port action for policy.");
16005                                         goto err_exit;
16006                                 }
16007                                 acts[i].dv_actions[acts[i].actions_n] =
16008                                 port_action->action;
16009                                 acts[i].actions_n++;
16010                                 mtr_policy->dev = dev;
16011                                 match_src_port = true;
16012                                 break;
16013                         case MLX5_FLOW_FATE_DROP:
16014                         case MLX5_FLOW_FATE_JUMP:
16015                                 acts[i].dv_actions[acts[i].actions_n] =
16016                                 mtr_policy->act_cnt[i].dr_jump_action[domain];
16017                                 acts[i].actions_n++;
16018                                 break;
16019                         case MLX5_FLOW_FATE_SHARED_RSS:
16020                         case MLX5_FLOW_FATE_QUEUE:
16021                                 hrxq = mlx5_ipool_get
16022                                 (priv->sh->ipool[MLX5_IPOOL_HRXQ],
16023                                 sub_policy->rix_hrxq[i]);
16024                                 if (!hrxq) {
16025                                         DRV_LOG(ERR, "Failed to find "
16026                                                 "queue action for policy.");
16027                                         goto err_exit;
16028                                 }
16029                                 acts[i].dv_actions[acts[i].actions_n] =
16030                                 hrxq->action;
16031                                 acts[i].actions_n++;
16032                                 break;
16033                         case MLX5_FLOW_FATE_MTR:
16034                                 if (!next_fm) {
16035                                         DRV_LOG(ERR,
16036                                                 "No next hierarchy meter.");
16037                                         goto err_exit;
16038                                 }
16039                                 if (!mtr_first) {
16040                                         acts[i].dv_actions[acts[i].actions_n] =
16041                                                         next_fm->meter_action;
16042                                         acts[i].actions_n++;
16043                                 }
16044                                 if (mtr_policy->act_cnt[i].next_sub_policy) {
16045                                         next_sub_policy =
16046                                         mtr_policy->act_cnt[i].next_sub_policy;
16047                                 } else {
16048                                         next_policy =
16049                                                 mlx5_flow_meter_policy_find(dev,
16050                                                 next_fm->policy_id, NULL);
16051                                         MLX5_ASSERT(next_policy);
16052                                         next_sub_policy =
16053                                         next_policy->sub_policys[domain][0];
16054                                 }
16055                                 tbl_data =
16056                                         container_of(next_sub_policy->tbl_rsc,
16057                                         struct mlx5_flow_tbl_data_entry, tbl);
16058                                 acts[i].dv_actions[acts[i].actions_n++] =
16059                                                         tbl_data->jump.action;
16060                                 if (mtr_policy->act_cnt[i].modify_hdr)
16061                                         match_src_port = !!transfer;
16062                                 break;
16063                         default:
16064                                 /*Queue action do nothing*/
16065                                 break;
16066                         }
16067                 }
16068         }
16069         if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
16070                                 egress, transfer, match_src_port, acts)) {
16071                 DRV_LOG(ERR,
16072                 "Failed to create policy rules per domain.");
16073                 goto err_exit;
16074         }
16075         return 0;
16076 err_exit:
16077         if (next_fm)
16078                 mlx5_flow_meter_detach(priv, next_fm);
16079         return -1;
16080 }
16081
16082 /**
16083  * Create the policy rules.
16084  *
16085  * @param[in] dev
16086  *   Pointer to Ethernet device.
16087  * @param[in,out] mtr_policy
16088  *   Pointer to meter policy table.
16089  *
16090  * @return
16091  *   0 on success, -1 otherwise.
16092  */
16093 static int
16094 flow_dv_create_policy_rules(struct rte_eth_dev *dev,
16095                              struct mlx5_flow_meter_policy *mtr_policy)
16096 {
16097         int i;
16098         uint16_t sub_policy_num;
16099
16100         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16101                 sub_policy_num = (mtr_policy->sub_policy_num >>
16102                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
16103                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16104                 if (!sub_policy_num)
16105                         continue;
16106                 /* Prepare actions list and create policy rules. */
16107                 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16108                         mtr_policy->sub_policys[i][0], i)) {
16109                         DRV_LOG(ERR,
16110                         "Failed to create policy action list per domain.");
16111                         return -1;
16112                 }
16113         }
16114         return 0;
16115 }
16116
16117 static int
16118 __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
16119 {
16120         struct mlx5_priv *priv = dev->data->dev_private;
16121         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16122         struct mlx5_flow_meter_def_policy *def_policy;
16123         struct mlx5_flow_tbl_resource *jump_tbl;
16124         struct mlx5_flow_tbl_data_entry *tbl_data;
16125         uint8_t egress, transfer;
16126         struct rte_flow_error error;
16127         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16128         int ret;
16129
16130         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16131         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16132         def_policy = mtrmng->def_policy[domain];
16133         if (!def_policy) {
16134                 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
16135                         sizeof(struct mlx5_flow_meter_def_policy),
16136                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
16137                 if (!def_policy) {
16138                         DRV_LOG(ERR, "Failed to alloc "
16139                                         "default policy table.");
16140                         goto def_policy_error;
16141                 }
16142                 mtrmng->def_policy[domain] = def_policy;
16143                 /* Create the meter suffix table with SUFFIX level. */
16144                 jump_tbl = flow_dv_tbl_resource_get(dev,
16145                                 MLX5_FLOW_TABLE_LEVEL_METER,
16146                                 egress, transfer, false, NULL, 0,
16147                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
16148                 if (!jump_tbl) {
16149                         DRV_LOG(ERR,
16150                                 "Failed to create meter suffix table.");
16151                         goto def_policy_error;
16152                 }
16153                 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
16154                 tbl_data = container_of(jump_tbl,
16155                                 struct mlx5_flow_tbl_data_entry, tbl);
16156                 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
16157                                                 tbl_data->jump.action;
16158                 acts[RTE_COLOR_GREEN].dv_actions[0] =
16159                                                 tbl_data->jump.action;
16160                 acts[RTE_COLOR_GREEN].actions_n = 1;
16161                 /* Create jump action to the drop table. */
16162                 if (!mtrmng->drop_tbl[domain]) {
16163                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
16164                                 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
16165                                 egress, transfer, false, NULL, 0,
16166                                 0, MLX5_MTR_TABLE_ID_DROP, &error);
16167                         if (!mtrmng->drop_tbl[domain]) {
16168                                 DRV_LOG(ERR, "Failed to create "
16169                                 "meter drop table for default policy.");
16170                                 goto def_policy_error;
16171                         }
16172                 }
16173                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16174                                 struct mlx5_flow_tbl_data_entry, tbl);
16175                 def_policy->dr_jump_action[RTE_COLOR_RED] =
16176                                                 tbl_data->jump.action;
16177                 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
16178                 acts[RTE_COLOR_RED].actions_n = 1;
16179                 /* Create default policy rules. */
16180                 ret = __flow_dv_create_domain_policy_rules(dev,
16181                                         &def_policy->sub_policy,
16182                                         egress, transfer, false, acts);
16183                 if (ret) {
16184                         DRV_LOG(ERR, "Failed to create "
16185                                 "default policy rules.");
16186                                 goto def_policy_error;
16187                 }
16188         }
16189         return 0;
16190 def_policy_error:
16191         __flow_dv_destroy_domain_def_policy(dev,
16192                         (enum mlx5_meter_domain)domain);
16193         return -1;
16194 }
16195
16196 /**
16197  * Create the default policy table set.
16198  *
16199  * @param[in] dev
16200  *   Pointer to Ethernet device.
16201  * @return
16202  *   0 on success, -1 otherwise.
16203  */
16204 static int
16205 flow_dv_create_def_policy(struct rte_eth_dev *dev)
16206 {
16207         struct mlx5_priv *priv = dev->data->dev_private;
16208         int i;
16209
16210         /* Non-termination policy table. */
16211         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16212                 if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
16213                         continue;
16214                 if (__flow_dv_create_domain_def_policy(dev, i)) {
16215                         DRV_LOG(ERR,
16216                         "Failed to create default policy");
16217                         return -1;
16218                 }
16219         }
16220         return 0;
16221 }
16222
16223 /**
16224  * Create the needed meter tables.
16225  * Lock free, (mutex should be acquired by caller).
16226  *
16227  * @param[in] dev
16228  *   Pointer to Ethernet device.
16229  * @param[in] fm
16230  *   Meter information table.
16231  * @param[in] mtr_idx
16232  *   Meter index.
16233  * @param[in] domain_bitmap
16234  *   Domain bitmap.
16235  * @return
16236  *   0 on success, -1 otherwise.
16237  */
16238 static int
16239 flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
16240                         struct mlx5_flow_meter_info *fm,
16241                         uint32_t mtr_idx,
16242                         uint8_t domain_bitmap)
16243 {
16244         struct mlx5_priv *priv = dev->data->dev_private;
16245         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16246         struct rte_flow_error error;
16247         struct mlx5_flow_tbl_data_entry *tbl_data;
16248         uint8_t egress, transfer;
16249         void *actions[METER_ACTIONS];
16250         int domain, ret, i;
16251         struct mlx5_flow_counter *cnt;
16252         struct mlx5_flow_dv_match_params value = {
16253                 .size = sizeof(value.buf),
16254         };
16255         struct mlx5_flow_dv_match_params matcher_para = {
16256                 .size = sizeof(matcher_para.buf),
16257         };
16258         int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
16259                                                      0, &error);
16260         uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
16261         uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
16262         struct mlx5_list_entry *entry;
16263         struct mlx5_flow_dv_matcher matcher = {
16264                 .mask = {
16265                         .size = sizeof(matcher.mask.buf),
16266                 },
16267         };
16268         struct mlx5_flow_dv_matcher *drop_matcher;
16269         struct mlx5_flow_cb_ctx ctx = {
16270                 .error = &error,
16271                 .data = &matcher,
16272         };
16273         uint8_t misc_mask;
16274
16275         if (!priv->mtr_en || mtr_id_reg_c < 0) {
16276                 rte_errno = ENOTSUP;
16277                 return -1;
16278         }
16279         for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
16280                 if (!(domain_bitmap & (1 << domain)) ||
16281                         (mtrmng->def_rule[domain] && !fm->drop_cnt))
16282                         continue;
16283                 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16284                 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16285                 /* Create the drop table with METER DROP level. */
16286                 if (!mtrmng->drop_tbl[domain]) {
16287                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
16288                                         MLX5_FLOW_TABLE_LEVEL_METER,
16289                                         egress, transfer, false, NULL, 0,
16290                                         0, MLX5_MTR_TABLE_ID_DROP, &error);
16291                         if (!mtrmng->drop_tbl[domain]) {
16292                                 DRV_LOG(ERR, "Failed to create meter drop table.");
16293                                 goto policy_error;
16294                         }
16295                 }
16296                 /* Create default matcher in drop table. */
16297                 matcher.tbl = mtrmng->drop_tbl[domain],
16298                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16299                                 struct mlx5_flow_tbl_data_entry, tbl);
16300                 if (!mtrmng->def_matcher[domain]) {
16301                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16302                                        (enum modify_reg)mtr_id_reg_c,
16303                                        0, 0);
16304                         matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
16305                         matcher.crc = rte_raw_cksum
16306                                         ((const void *)matcher.mask.buf,
16307                                         matcher.mask.size);
16308                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16309                         if (!entry) {
16310                                 DRV_LOG(ERR, "Failed to register meter "
16311                                 "drop default matcher.");
16312                                 goto policy_error;
16313                         }
16314                         mtrmng->def_matcher[domain] = container_of(entry,
16315                         struct mlx5_flow_dv_matcher, entry);
16316                 }
16317                 /* Create default rule in drop table. */
16318                 if (!mtrmng->def_rule[domain]) {
16319                         i = 0;
16320                         actions[i++] = priv->sh->dr_drop_action;
16321                         flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16322                                 (enum modify_reg)mtr_id_reg_c, 0, 0);
16323                         misc_mask = flow_dv_matcher_enable(value.buf);
16324                         __flow_dv_adjust_buf_size(&value.size, misc_mask);
16325                         ret = mlx5_flow_os_create_flow
16326                                 (mtrmng->def_matcher[domain]->matcher_object,
16327                                 (void *)&value, i, actions,
16328                                 &mtrmng->def_rule[domain]);
16329                         if (ret) {
16330                                 DRV_LOG(ERR, "Failed to create meter "
16331                                 "default drop rule for drop table.");
16332                                 goto policy_error;
16333                         }
16334                 }
16335                 if (!fm->drop_cnt)
16336                         continue;
16337                 MLX5_ASSERT(mtrmng->max_mtr_bits);
16338                 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
16339                         /* Create matchers for Drop. */
16340                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16341                                         (enum modify_reg)mtr_id_reg_c, 0,
16342                                         (mtr_id_mask << mtr_id_offset));
16343                         matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
16344                         matcher.crc = rte_raw_cksum
16345                                         ((const void *)matcher.mask.buf,
16346                                         matcher.mask.size);
16347                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16348                         if (!entry) {
16349                                 DRV_LOG(ERR,
16350                                 "Failed to register meter drop matcher.");
16351                                 goto policy_error;
16352                         }
16353                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
16354                                 container_of(entry, struct mlx5_flow_dv_matcher,
16355                                              entry);
16356                 }
16357                 drop_matcher =
16358                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
16359                 /* Create drop rule, matching meter_id only. */
16360                 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16361                                 (enum modify_reg)mtr_id_reg_c,
16362                                 (mtr_idx << mtr_id_offset), UINT32_MAX);
16363                 i = 0;
16364                 cnt = flow_dv_counter_get_by_idx(dev,
16365                                         fm->drop_cnt, NULL);
16366                 actions[i++] = cnt->action;
16367                 actions[i++] = priv->sh->dr_drop_action;
16368                 misc_mask = flow_dv_matcher_enable(value.buf);
16369                 __flow_dv_adjust_buf_size(&value.size, misc_mask);
16370                 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
16371                                                (void *)&value, i, actions,
16372                                                &fm->drop_rule[domain]);
16373                 if (ret) {
16374                         DRV_LOG(ERR, "Failed to create meter "
16375                                 "drop rule for drop table.");
16376                                 goto policy_error;
16377                 }
16378         }
16379         return 0;
16380 policy_error:
16381         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16382                 if (fm->drop_rule[i]) {
16383                         claim_zero(mlx5_flow_os_destroy_flow
16384                                 (fm->drop_rule[i]));
16385                         fm->drop_rule[i] = NULL;
16386                 }
16387         }
16388         return -1;
16389 }
16390
16391 static struct mlx5_flow_meter_sub_policy *
16392 __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
16393                 struct mlx5_flow_meter_policy *mtr_policy,
16394                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS],
16395                 struct mlx5_flow_meter_sub_policy *next_sub_policy,
16396                 bool *is_reuse)
16397 {
16398         struct mlx5_priv *priv = dev->data->dev_private;
16399         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16400         uint32_t sub_policy_idx = 0;
16401         uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
16402         uint32_t i, j;
16403         struct mlx5_hrxq *hrxq;
16404         struct mlx5_flow_handle dh;
16405         struct mlx5_meter_policy_action_container *act_cnt;
16406         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16407         uint16_t sub_policy_num;
16408
16409         rte_spinlock_lock(&mtr_policy->sl);
16410         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16411                 if (!rss_desc[i])
16412                         continue;
16413                 hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
16414                 if (!hrxq_idx[i]) {
16415                         rte_spinlock_unlock(&mtr_policy->sl);
16416                         return NULL;
16417                 }
16418         }
16419         sub_policy_num = (mtr_policy->sub_policy_num >>
16420                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16421                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16422         for (i = 0; i < sub_policy_num;
16423                 i++) {
16424                 for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
16425                         if (rss_desc[j] &&
16426                                 hrxq_idx[j] !=
16427                         mtr_policy->sub_policys[domain][i]->rix_hrxq[j])
16428                                 break;
16429                 }
16430                 if (j >= MLX5_MTR_RTE_COLORS) {
16431                         /*
16432                          * Found the sub policy table with
16433                          * the same queue per color
16434                          */
16435                         rte_spinlock_unlock(&mtr_policy->sl);
16436                         for (j = 0; j < MLX5_MTR_RTE_COLORS; j++)
16437                                 mlx5_hrxq_release(dev, hrxq_idx[j]);
16438                         *is_reuse = true;
16439                         return mtr_policy->sub_policys[domain][i];
16440                 }
16441         }
16442         /* Create sub policy. */
16443         if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
16444                 /* Reuse the first dummy sub_policy*/
16445                 sub_policy = mtr_policy->sub_policys[domain][0];
16446                 sub_policy_idx = sub_policy->idx;
16447         } else {
16448                 sub_policy = mlx5_ipool_zmalloc
16449                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16450                                 &sub_policy_idx);
16451                 if (!sub_policy ||
16452                         sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
16453                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16454                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
16455                         goto rss_sub_policy_error;
16456                 }
16457                 sub_policy->idx = sub_policy_idx;
16458                 sub_policy->main_policy = mtr_policy;
16459         }
16460         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16461                 if (!rss_desc[i])
16462                         continue;
16463                 sub_policy->rix_hrxq[i] = hrxq_idx[i];
16464                 if (mtr_policy->is_hierarchy) {
16465                         act_cnt = &mtr_policy->act_cnt[i];
16466                         act_cnt->next_sub_policy = next_sub_policy;
16467                         mlx5_hrxq_release(dev, hrxq_idx[i]);
16468                 } else {
16469                         /*
16470                          * Overwrite the last action from
16471                          * RSS action to Queue action.
16472                          */
16473                         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
16474                                 hrxq_idx[i]);
16475                         if (!hrxq) {
16476                                 DRV_LOG(ERR, "Failed to create policy hrxq");
16477                                 goto rss_sub_policy_error;
16478                         }
16479                         act_cnt = &mtr_policy->act_cnt[i];
16480                         if (act_cnt->rix_mark || act_cnt->modify_hdr) {
16481                                 memset(&dh, 0, sizeof(struct mlx5_flow_handle));
16482                                 if (act_cnt->rix_mark)
16483                                         dh.mark = 1;
16484                                 dh.fate_action = MLX5_FLOW_FATE_QUEUE;
16485                                 dh.rix_hrxq = hrxq_idx[i];
16486                                 flow_drv_rxq_flags_set(dev, &dh);
16487                         }
16488                 }
16489         }
16490         if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16491                 sub_policy, domain)) {
16492                 DRV_LOG(ERR, "Failed to create policy "
16493                         "rules per domain.");
16494                 goto rss_sub_policy_error;
16495         }
16496         if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16497                 i = (mtr_policy->sub_policy_num >>
16498                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16499                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16500                 mtr_policy->sub_policys[domain][i] = sub_policy;
16501                 i++;
16502                 if (i > MLX5_MTR_RSS_MAX_SUB_POLICY)
16503                         goto rss_sub_policy_error;
16504                 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16505                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16506                 mtr_policy->sub_policy_num |=
16507                         (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16508                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16509         }
16510         rte_spinlock_unlock(&mtr_policy->sl);
16511         *is_reuse = false;
16512         return sub_policy;
16513 rss_sub_policy_error:
16514         if (sub_policy) {
16515                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16516                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16517                         i = (mtr_policy->sub_policy_num >>
16518                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16519                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16520                         mtr_policy->sub_policys[domain][i] = NULL;
16521                         mlx5_ipool_free
16522                         (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16523                                         sub_policy->idx);
16524                 }
16525         }
16526         rte_spinlock_unlock(&mtr_policy->sl);
16527         return NULL;
16528 }
16529
16530 /**
16531  * Find the policy table for prefix table with RSS.
16532  *
16533  * @param[in] dev
16534  *   Pointer to Ethernet device.
16535  * @param[in] mtr_policy
16536  *   Pointer to meter policy table.
16537  * @param[in] rss_desc
16538  *   Pointer to rss_desc
16539  * @return
16540  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
16541  */
16542 static struct mlx5_flow_meter_sub_policy *
16543 flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
16544                 struct mlx5_flow_meter_policy *mtr_policy,
16545                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
16546 {
16547         struct mlx5_priv *priv = dev->data->dev_private;
16548         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16549         struct mlx5_flow_meter_info *next_fm;
16550         struct mlx5_flow_meter_policy *next_policy;
16551         struct mlx5_flow_meter_sub_policy *next_sub_policy = NULL;
16552         struct mlx5_flow_meter_policy *policies[MLX5_MTR_CHAIN_MAX_NUM];
16553         struct mlx5_flow_meter_sub_policy *sub_policies[MLX5_MTR_CHAIN_MAX_NUM];
16554         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16555         bool reuse_sub_policy;
16556         uint32_t i = 0;
16557         uint32_t j = 0;
16558
16559         while (true) {
16560                 /* Iterate hierarchy to get all policies in this hierarchy. */
16561                 policies[i++] = mtr_policy;
16562                 if (!mtr_policy->is_hierarchy)
16563                         break;
16564                 if (i >= MLX5_MTR_CHAIN_MAX_NUM) {
16565                         DRV_LOG(ERR, "Exceed max meter number in hierarchy.");
16566                         return NULL;
16567                 }
16568                 next_fm = mlx5_flow_meter_find(priv,
16569                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
16570                 if (!next_fm) {
16571                         DRV_LOG(ERR, "Failed to get next meter in hierarchy.");
16572                         return NULL;
16573                 }
16574                 next_policy =
16575                         mlx5_flow_meter_policy_find(dev, next_fm->policy_id,
16576                                                     NULL);
16577                 MLX5_ASSERT(next_policy);
16578                 mtr_policy = next_policy;
16579         }
16580         while (i) {
16581                 /**
16582                  * From last policy to the first one in hierarchy,
16583                  * create/get the sub policy for each of them.
16584                  */
16585                 sub_policy = __flow_dv_meter_get_rss_sub_policy(dev,
16586                                                         policies[--i],
16587                                                         rss_desc,
16588                                                         next_sub_policy,
16589                                                         &reuse_sub_policy);
16590                 if (!sub_policy) {
16591                         DRV_LOG(ERR, "Failed to get the sub policy.");
16592                         goto err_exit;
16593                 }
16594                 if (!reuse_sub_policy)
16595                         sub_policies[j++] = sub_policy;
16596                 next_sub_policy = sub_policy;
16597         }
16598         return sub_policy;
16599 err_exit:
16600         while (j) {
16601                 uint16_t sub_policy_num;
16602
16603                 sub_policy = sub_policies[--j];
16604                 mtr_policy = sub_policy->main_policy;
16605                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16606                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16607                         sub_policy_num = (mtr_policy->sub_policy_num >>
16608                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16609                                 MLX5_MTR_SUB_POLICY_NUM_MASK;
16610                         mtr_policy->sub_policys[domain][sub_policy_num - 1] =
16611                                                                         NULL;
16612                         sub_policy_num--;
16613                         mtr_policy->sub_policy_num &=
16614                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16615                                   (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i));
16616                         mtr_policy->sub_policy_num |=
16617                         (sub_policy_num & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16618                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i);
16619                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16620                                         sub_policy->idx);
16621                 }
16622         }
16623         return NULL;
16624 }
16625
16626 /**
16627  * Create the sub policy tag rule for all meters in hierarchy.
16628  *
16629  * @param[in] dev
16630  *   Pointer to Ethernet device.
16631  * @param[in] fm
16632  *   Meter information table.
16633  * @param[in] src_port
16634  *   The src port this extra rule should use.
16635  * @param[in] item
16636  *   The src port match item.
16637  * @param[out] error
16638  *   Perform verbose error reporting if not NULL.
16639  * @return
16640  *   0 on success, a negative errno value otherwise and rte_errno is set.
16641  */
16642 static int
16643 flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
16644                                 struct mlx5_flow_meter_info *fm,
16645                                 int32_t src_port,
16646                                 const struct rte_flow_item *item,
16647                                 struct rte_flow_error *error)
16648 {
16649         struct mlx5_priv *priv = dev->data->dev_private;
16650         struct mlx5_flow_meter_policy *mtr_policy;
16651         struct mlx5_flow_meter_sub_policy *sub_policy;
16652         struct mlx5_flow_meter_info *next_fm = NULL;
16653         struct mlx5_flow_meter_policy *next_policy;
16654         struct mlx5_flow_meter_sub_policy *next_sub_policy;
16655         struct mlx5_flow_tbl_data_entry *tbl_data;
16656         struct mlx5_sub_policy_color_rule *color_rule;
16657         struct mlx5_meter_policy_acts acts;
16658         uint32_t color_reg_c_idx;
16659         bool mtr_first = (src_port != UINT16_MAX) ? true : false;
16660         struct rte_flow_attr attr = {
16661                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
16662                 .priority = 0,
16663                 .ingress = 0,
16664                 .egress = 0,
16665                 .transfer = 1,
16666                 .reserved = 0,
16667         };
16668         uint32_t domain = MLX5_MTR_DOMAIN_TRANSFER;
16669         int i;
16670
16671         mtr_policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
16672         MLX5_ASSERT(mtr_policy);
16673         if (!mtr_policy->is_hierarchy)
16674                 return 0;
16675         next_fm = mlx5_flow_meter_find(priv,
16676                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
16677         if (!next_fm) {
16678                 return rte_flow_error_set(error, EINVAL,
16679                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
16680                                 "Failed to find next meter in hierarchy.");
16681         }
16682         if (!next_fm->drop_cnt)
16683                 goto exit;
16684         color_reg_c_idx = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, error);
16685         sub_policy = mtr_policy->sub_policys[domain][0];
16686         for (i = 0; i < RTE_COLORS; i++) {
16687                 bool rule_exist = false;
16688                 struct mlx5_meter_policy_action_container *act_cnt;
16689
16690                 if (i >= RTE_COLOR_YELLOW)
16691                         break;
16692                 TAILQ_FOREACH(color_rule,
16693                               &sub_policy->color_rules[i], next_port)
16694                         if (color_rule->src_port == src_port) {
16695                                 rule_exist = true;
16696                                 break;
16697                         }
16698                 if (rule_exist)
16699                         continue;
16700                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
16701                                 sizeof(struct mlx5_sub_policy_color_rule),
16702                                 0, SOCKET_ID_ANY);
16703                 if (!color_rule)
16704                         return rte_flow_error_set(error, ENOMEM,
16705                                 RTE_FLOW_ERROR_TYPE_ACTION,
16706                                 NULL, "No memory to create tag color rule.");
16707                 color_rule->src_port = src_port;
16708                 attr.priority = i;
16709                 next_policy = mlx5_flow_meter_policy_find(dev,
16710                                                 next_fm->policy_id, NULL);
16711                 MLX5_ASSERT(next_policy);
16712                 next_sub_policy = next_policy->sub_policys[domain][0];
16713                 tbl_data = container_of(next_sub_policy->tbl_rsc,
16714                                         struct mlx5_flow_tbl_data_entry, tbl);
16715                 act_cnt = &mtr_policy->act_cnt[i];
16716                 if (mtr_first) {
16717                         acts.dv_actions[0] = next_fm->meter_action;
16718                         acts.dv_actions[1] = act_cnt->modify_hdr->action;
16719                 } else {
16720                         acts.dv_actions[0] = act_cnt->modify_hdr->action;
16721                         acts.dv_actions[1] = next_fm->meter_action;
16722                 }
16723                 acts.dv_actions[2] = tbl_data->jump.action;
16724                 acts.actions_n = 3;
16725                 if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) {
16726                         next_fm = NULL;
16727                         goto err_exit;
16728                 }
16729                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
16730                                         i, sub_policy, &attr, true, item,
16731                                         &color_rule->matcher, error)) {
16732                         rte_flow_error_set(error, errno,
16733                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16734                                 "Failed to create hierarchy meter matcher.");
16735                         goto err_exit;
16736                 }
16737                 if (__flow_dv_create_policy_flow(dev, color_reg_c_idx,
16738                                         (enum rte_color)i,
16739                                         color_rule->matcher->matcher_object,
16740                                         acts.actions_n, acts.dv_actions,
16741                                         true, item,
16742                                         &color_rule->rule, &attr)) {
16743                         rte_flow_error_set(error, errno,
16744                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16745                                 "Failed to create hierarchy meter rule.");
16746                         goto err_exit;
16747                 }
16748                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
16749                                   color_rule, next_port);
16750         }
16751 exit:
16752         /**
16753          * Recursive call to iterate all meters in hierarchy and
16754          * create needed rules.
16755          */
16756         return flow_dv_meter_hierarchy_rule_create(dev, next_fm,
16757                                                 src_port, item, error);
16758 err_exit:
16759         if (color_rule) {
16760                 if (color_rule->rule)
16761                         mlx5_flow_os_destroy_flow(color_rule->rule);
16762                 if (color_rule->matcher) {
16763                         struct mlx5_flow_tbl_data_entry *tbl =
16764                                 container_of(color_rule->matcher->tbl,
16765                                                 typeof(*tbl), tbl);
16766                         mlx5_list_unregister(tbl->matchers,
16767                                                 &color_rule->matcher->entry);
16768                 }
16769                 mlx5_free(color_rule);
16770         }
16771         if (next_fm)
16772                 mlx5_flow_meter_detach(priv, next_fm);
16773         return -rte_errno;
16774 }
16775
16776 /**
16777  * Destroy the sub policy table with RX queue.
16778  *
16779  * @param[in] dev
16780  *   Pointer to Ethernet device.
16781  * @param[in] mtr_policy
16782  *   Pointer to meter policy table.
16783  */
16784 static void
16785 flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
16786                 struct mlx5_flow_meter_policy *mtr_policy)
16787 {
16788         struct mlx5_priv *priv = dev->data->dev_private;
16789         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16790         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16791         uint32_t i, j;
16792         uint16_t sub_policy_num, new_policy_num;
16793
16794         rte_spinlock_lock(&mtr_policy->sl);
16795         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16796                 switch (mtr_policy->act_cnt[i].fate_action) {
16797                 case MLX5_FLOW_FATE_SHARED_RSS:
16798                         sub_policy_num = (mtr_policy->sub_policy_num >>
16799                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16800                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16801                         new_policy_num = sub_policy_num;
16802                         for (j = 0; j < sub_policy_num; j++) {
16803                                 sub_policy =
16804                                         mtr_policy->sub_policys[domain][j];
16805                                 if (sub_policy) {
16806                                         __flow_dv_destroy_sub_policy_rules(dev,
16807                                                 sub_policy);
16808                                 if (sub_policy !=
16809                                         mtr_policy->sub_policys[domain][0]) {
16810                                         mtr_policy->sub_policys[domain][j] =
16811                                                                 NULL;
16812                                         mlx5_ipool_free
16813                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16814                                                 sub_policy->idx);
16815                                                 new_policy_num--;
16816                                         }
16817                                 }
16818                         }
16819                         if (new_policy_num != sub_policy_num) {
16820                                 mtr_policy->sub_policy_num &=
16821                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16822                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16823                                 mtr_policy->sub_policy_num |=
16824                                 (new_policy_num &
16825                                         MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16826                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16827                         }
16828                         break;
16829                 case MLX5_FLOW_FATE_QUEUE:
16830                         sub_policy = mtr_policy->sub_policys[domain][0];
16831                         __flow_dv_destroy_sub_policy_rules(dev,
16832                                                 sub_policy);
16833                         break;
16834                 default:
16835                         /*Other actions without queue and do nothing*/
16836                         break;
16837                 }
16838         }
16839         rte_spinlock_unlock(&mtr_policy->sl);
16840 }
16841
16842 /**
16843  * Validate the batch counter support in root table.
16844  *
16845  * Create a simple flow with invalid counter and drop action on root table to
16846  * validate if batch counter with offset on root table is supported or not.
16847  *
16848  * @param[in] dev
16849  *   Pointer to rte_eth_dev structure.
16850  *
16851  * @return
16852  *   0 on success, a negative errno value otherwise and rte_errno is set.
16853  */
16854 int
16855 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
16856 {
16857         struct mlx5_priv *priv = dev->data->dev_private;
16858         struct mlx5_dev_ctx_shared *sh = priv->sh;
16859         struct mlx5_flow_dv_match_params mask = {
16860                 .size = sizeof(mask.buf),
16861         };
16862         struct mlx5_flow_dv_match_params value = {
16863                 .size = sizeof(value.buf),
16864         };
16865         struct mlx5dv_flow_matcher_attr dv_attr = {
16866                 .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
16867                 .priority = 0,
16868                 .match_criteria_enable = 0,
16869                 .match_mask = (void *)&mask,
16870         };
16871         void *actions[2] = { 0 };
16872         struct mlx5_flow_tbl_resource *tbl = NULL;
16873         struct mlx5_devx_obj *dcs = NULL;
16874         void *matcher = NULL;
16875         void *flow = NULL;
16876         int ret = -1;
16877
16878         tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
16879                                         0, 0, 0, NULL);
16880         if (!tbl)
16881                 goto err;
16882         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
16883         if (!dcs)
16884                 goto err;
16885         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
16886                                                     &actions[0]);
16887         if (ret)
16888                 goto err;
16889         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
16890         __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
16891         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
16892                                                &matcher);
16893         if (ret)
16894                 goto err;
16895         __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
16896         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
16897                                        actions, &flow);
16898 err:
16899         /*
16900          * If batch counter with offset is not supported, the driver will not
16901          * validate the invalid offset value, flow create should success.
16902          * In this case, it means batch counter is not supported in root table.
16903          *
16904          * Otherwise, if flow create is failed, counter offset is supported.
16905          */
16906         if (flow) {
16907                 DRV_LOG(INFO, "Batch counter is not supported in root "
16908                               "table. Switch to fallback mode.");
16909                 rte_errno = ENOTSUP;
16910                 ret = -rte_errno;
16911                 claim_zero(mlx5_flow_os_destroy_flow(flow));
16912         } else {
16913                 /* Check matcher to make sure validate fail at flow create. */
16914                 if (!matcher || (matcher && errno != EINVAL))
16915                         DRV_LOG(ERR, "Unexpected error in counter offset "
16916                                      "support detection");
16917                 ret = 0;
16918         }
16919         if (actions[0])
16920                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
16921         if (matcher)
16922                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
16923         if (tbl)
16924                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
16925         if (dcs)
16926                 claim_zero(mlx5_devx_cmd_destroy(dcs));
16927         return ret;
16928 }
16929
16930 /**
16931  * Query a devx counter.
16932  *
16933  * @param[in] dev
16934  *   Pointer to the Ethernet device structure.
16935  * @param[in] cnt
16936  *   Index to the flow counter.
16937  * @param[in] clear
16938  *   Set to clear the counter statistics.
16939  * @param[out] pkts
16940  *   The statistics value of packets.
16941  * @param[out] bytes
16942  *   The statistics value of bytes.
16943  *
16944  * @return
16945  *   0 on success, otherwise return -1.
16946  */
16947 static int
16948 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
16949                       uint64_t *pkts, uint64_t *bytes)
16950 {
16951         struct mlx5_priv *priv = dev->data->dev_private;
16952         struct mlx5_flow_counter *cnt;
16953         uint64_t inn_pkts, inn_bytes;
16954         int ret;
16955
16956         if (!priv->config.devx)
16957                 return -1;
16958
16959         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
16960         if (ret)
16961                 return -1;
16962         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
16963         *pkts = inn_pkts - cnt->hits;
16964         *bytes = inn_bytes - cnt->bytes;
16965         if (clear) {
16966                 cnt->hits = inn_pkts;
16967                 cnt->bytes = inn_bytes;
16968         }
16969         return 0;
16970 }
16971
16972 /**
16973  * Get aged-out flows.
16974  *
16975  * @param[in] dev
16976  *   Pointer to the Ethernet device structure.
16977  * @param[in] context
16978  *   The address of an array of pointers to the aged-out flows contexts.
16979  * @param[in] nb_contexts
16980  *   The length of context array pointers.
16981  * @param[out] error
16982  *   Perform verbose error reporting if not NULL. Initialized in case of
16983  *   error only.
16984  *
16985  * @return
16986  *   how many contexts get in success, otherwise negative errno value.
16987  *   if nb_contexts is 0, return the amount of all aged contexts.
16988  *   if nb_contexts is not 0 , return the amount of aged flows reported
16989  *   in the context array.
16990  * @note: only stub for now
16991  */
16992 static int
16993 flow_get_aged_flows(struct rte_eth_dev *dev,
16994                     void **context,
16995                     uint32_t nb_contexts,
16996                     struct rte_flow_error *error)
16997 {
16998         struct mlx5_priv *priv = dev->data->dev_private;
16999         struct mlx5_age_info *age_info;
17000         struct mlx5_age_param *age_param;
17001         struct mlx5_flow_counter *counter;
17002         struct mlx5_aso_age_action *act;
17003         int nb_flows = 0;
17004
17005         if (nb_contexts && !context)
17006                 return rte_flow_error_set(error, EINVAL,
17007                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17008                                           NULL, "empty context");
17009         age_info = GET_PORT_AGE_INFO(priv);
17010         rte_spinlock_lock(&age_info->aged_sl);
17011         LIST_FOREACH(act, &age_info->aged_aso, next) {
17012                 nb_flows++;
17013                 if (nb_contexts) {
17014                         context[nb_flows - 1] =
17015                                                 act->age_params.context;
17016                         if (!(--nb_contexts))
17017                                 break;
17018                 }
17019         }
17020         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
17021                 nb_flows++;
17022                 if (nb_contexts) {
17023                         age_param = MLX5_CNT_TO_AGE(counter);
17024                         context[nb_flows - 1] = age_param->context;
17025                         if (!(--nb_contexts))
17026                                 break;
17027                 }
17028         }
17029         rte_spinlock_unlock(&age_info->aged_sl);
17030         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
17031         return nb_flows;
17032 }
17033
17034 /*
17035  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
17036  */
17037 static uint32_t
17038 flow_dv_counter_allocate(struct rte_eth_dev *dev)
17039 {
17040         return flow_dv_counter_alloc(dev, 0);
17041 }
17042
17043 /**
17044  * Validate indirect action.
17045  * Dispatcher for action type specific validation.
17046  *
17047  * @param[in] dev
17048  *   Pointer to the Ethernet device structure.
17049  * @param[in] conf
17050  *   Indirect action configuration.
17051  * @param[in] action
17052  *   The indirect action object to validate.
17053  * @param[out] error
17054  *   Perform verbose error reporting if not NULL. Initialized in case of
17055  *   error only.
17056  *
17057  * @return
17058  *   0 on success, otherwise negative errno value.
17059  */
17060 static int
17061 flow_dv_action_validate(struct rte_eth_dev *dev,
17062                         const struct rte_flow_indir_action_conf *conf,
17063                         const struct rte_flow_action *action,
17064                         struct rte_flow_error *err)
17065 {
17066         struct mlx5_priv *priv = dev->data->dev_private;
17067
17068         RTE_SET_USED(conf);
17069         switch (action->type) {
17070         case RTE_FLOW_ACTION_TYPE_RSS:
17071                 /*
17072                  * priv->obj_ops is set according to driver capabilities.
17073                  * When DevX capabilities are
17074                  * sufficient, it is set to devx_obj_ops.
17075                  * Otherwise, it is set to ibv_obj_ops.
17076                  * ibv_obj_ops doesn't support ind_table_modify operation.
17077                  * In this case the indirect RSS action can't be used.
17078                  */
17079                 if (priv->obj_ops.ind_table_modify == NULL)
17080                         return rte_flow_error_set
17081                                         (err, ENOTSUP,
17082                                          RTE_FLOW_ERROR_TYPE_ACTION,
17083                                          NULL,
17084                                          "Indirect RSS action not supported");
17085                 return mlx5_validate_action_rss(dev, action, err);
17086         case RTE_FLOW_ACTION_TYPE_AGE:
17087                 if (!priv->sh->aso_age_mng)
17088                         return rte_flow_error_set(err, ENOTSUP,
17089                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17090                                                 NULL,
17091                                                 "Indirect age action not supported");
17092                 return flow_dv_validate_action_age(0, action, dev, err);
17093         case RTE_FLOW_ACTION_TYPE_COUNT:
17094                 /*
17095                  * There are two mechanisms to share the action count.
17096                  * The old mechanism uses the shared field to share, while the
17097                  * new mechanism uses the indirect action API.
17098                  * This validation comes to make sure that the two mechanisms
17099                  * are not combined.
17100                  */
17101                 if (is_shared_action_count(action))
17102                         return rte_flow_error_set(err, ENOTSUP,
17103                                                   RTE_FLOW_ERROR_TYPE_ACTION,
17104                                                   NULL,
17105                                                   "Mix shared and indirect counter is not supported");
17106                 return flow_dv_validate_action_count(dev, true, 0, err);
17107         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
17108                 if (!priv->sh->ct_aso_en)
17109                         return rte_flow_error_set(err, ENOTSUP,
17110                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17111                                         "ASO CT is not supported");
17112                 return mlx5_validate_action_ct(dev, action->conf, err);
17113         default:
17114                 return rte_flow_error_set(err, ENOTSUP,
17115                                           RTE_FLOW_ERROR_TYPE_ACTION,
17116                                           NULL,
17117                                           "action type not supported");
17118         }
17119 }
17120
17121 /**
17122  * Validate the meter hierarchy chain for meter policy.
17123  *
17124  * @param[in] dev
17125  *   Pointer to the Ethernet device structure.
17126  * @param[in] meter_id
17127  *   Meter id.
17128  * @param[in] action_flags
17129  *   Holds the actions detected until now.
17130  * @param[out] is_rss
17131  *   Is RSS or not.
17132  * @param[out] hierarchy_domain
17133  *   The domain bitmap for hierarchy policy.
17134  * @param[out] error
17135  *   Perform verbose error reporting if not NULL. Initialized in case of
17136  *   error only.
17137  *
17138  * @return
17139  *   0 on success, otherwise negative errno value with error set.
17140  */
17141 static int
17142 flow_dv_validate_policy_mtr_hierarchy(struct rte_eth_dev *dev,
17143                                   uint32_t meter_id,
17144                                   uint64_t action_flags,
17145                                   bool *is_rss,
17146                                   uint8_t *hierarchy_domain,
17147                                   struct rte_mtr_error *error)
17148 {
17149         struct mlx5_priv *priv = dev->data->dev_private;
17150         struct mlx5_flow_meter_info *fm;
17151         struct mlx5_flow_meter_policy *policy;
17152         uint8_t cnt = 1;
17153
17154         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
17155                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
17156                 return -rte_mtr_error_set(error, EINVAL,
17157                                         RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN,
17158                                         NULL,
17159                                         "Multiple fate actions not supported.");
17160         while (true) {
17161                 fm = mlx5_flow_meter_find(priv, meter_id, NULL);
17162                 if (!fm)
17163                         return -rte_mtr_error_set(error, EINVAL,
17164                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17165                                         "Meter not found in meter hierarchy.");
17166                 if (fm->def_policy)
17167                         return -rte_mtr_error_set(error, EINVAL,
17168                                         RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17169                         "Non termination meter not supported in hierarchy.");
17170                 policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
17171                 MLX5_ASSERT(policy);
17172                 if (!policy->is_hierarchy) {
17173                         if (policy->transfer)
17174                                 *hierarchy_domain |=
17175                                                 MLX5_MTR_DOMAIN_TRANSFER_BIT;
17176                         if (policy->ingress)
17177                                 *hierarchy_domain |=
17178                                                 MLX5_MTR_DOMAIN_INGRESS_BIT;
17179                         if (policy->egress)
17180                                 *hierarchy_domain |= MLX5_MTR_DOMAIN_EGRESS_BIT;
17181                         *is_rss = policy->is_rss;
17182                         break;
17183                 }
17184                 meter_id = policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id;
17185                 if (++cnt >= MLX5_MTR_CHAIN_MAX_NUM)
17186                         return -rte_mtr_error_set(error, EINVAL,
17187                                         RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
17188                                         "Exceed max hierarchy meter number.");
17189         }
17190         return 0;
17191 }
17192
17193 /**
17194  * Validate meter policy actions.
17195  * Dispatcher for action type specific validation.
17196  *
17197  * @param[in] dev
17198  *   Pointer to the Ethernet device structure.
17199  * @param[in] action
17200  *   The meter policy action object to validate.
17201  * @param[in] attr
17202  *   Attributes of flow to determine steering domain.
17203  * @param[out] error
17204  *   Perform verbose error reporting if not NULL. Initialized in case of
17205  *   error only.
17206  *
17207  * @return
17208  *   0 on success, otherwise negative errno value.
17209  */
17210 static int
17211 flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
17212                         const struct rte_flow_action *actions[RTE_COLORS],
17213                         struct rte_flow_attr *attr,
17214                         bool *is_rss,
17215                         uint8_t *domain_bitmap,
17216                         bool *is_def_policy,
17217                         struct rte_mtr_error *error)
17218 {
17219         struct mlx5_priv *priv = dev->data->dev_private;
17220         struct mlx5_dev_config *dev_conf = &priv->config;
17221         const struct rte_flow_action *act;
17222         uint64_t action_flags = 0;
17223         int actions_n;
17224         int i, ret;
17225         struct rte_flow_error flow_err;
17226         uint8_t domain_color[RTE_COLORS] = {0};
17227         uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
17228         uint8_t hierarchy_domain = 0;
17229         const struct rte_flow_action_meter *mtr;
17230
17231         if (!priv->config.dv_esw_en)
17232                 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
17233         *domain_bitmap = def_domain;
17234         if (actions[RTE_COLOR_YELLOW] &&
17235                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_END)
17236                 return -rte_mtr_error_set(error, ENOTSUP,
17237                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17238                                 NULL,
17239                                 "Yellow color does not support any action.");
17240         if (actions[RTE_COLOR_YELLOW] &&
17241                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_DROP)
17242                 return -rte_mtr_error_set(error, ENOTSUP,
17243                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17244                                 NULL, "Red color only supports drop action.");
17245         /*
17246          * Check default policy actions:
17247          * Green/Yellow: no action, Red: drop action
17248          */
17249         if ((!actions[RTE_COLOR_GREEN] ||
17250                 actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)) {
17251                 *is_def_policy = true;
17252                 return 0;
17253         }
17254         flow_err.message = NULL;
17255         for (i = 0; i < RTE_COLORS; i++) {
17256                 act = actions[i];
17257                 for (action_flags = 0, actions_n = 0;
17258                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
17259                         act++) {
17260                         if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
17261                                 return -rte_mtr_error_set(error, ENOTSUP,
17262                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17263                                           NULL, "too many actions");
17264                         switch (act->type) {
17265                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
17266                                 if (!priv->config.dv_esw_en)
17267                                         return -rte_mtr_error_set(error,
17268                                         ENOTSUP,
17269                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17270                                         NULL, "PORT action validate check"
17271                                         " fail for ESW disable");
17272                                 ret = flow_dv_validate_action_port_id(dev,
17273                                                 action_flags,
17274                                                 act, attr, &flow_err);
17275                                 if (ret)
17276                                         return -rte_mtr_error_set(error,
17277                                         ENOTSUP,
17278                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17279                                         NULL, flow_err.message ?
17280                                         flow_err.message :
17281                                         "PORT action validate check fail");
17282                                 ++actions_n;
17283                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
17284                                 break;
17285                         case RTE_FLOW_ACTION_TYPE_MARK:
17286                                 ret = flow_dv_validate_action_mark(dev, act,
17287                                                            action_flags,
17288                                                            attr, &flow_err);
17289                                 if (ret < 0)
17290                                         return -rte_mtr_error_set(error,
17291                                         ENOTSUP,
17292                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17293                                         NULL, flow_err.message ?
17294                                         flow_err.message :
17295                                         "Mark action validate check fail");
17296                                 if (dev_conf->dv_xmeta_en !=
17297                                         MLX5_XMETA_MODE_LEGACY)
17298                                         return -rte_mtr_error_set(error,
17299                                         ENOTSUP,
17300                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17301                                         NULL, "Extend MARK action is "
17302                                         "not supported. Please try use "
17303                                         "default policy for meter.");
17304                                 action_flags |= MLX5_FLOW_ACTION_MARK;
17305                                 ++actions_n;
17306                                 break;
17307                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
17308                                 ret = flow_dv_validate_action_set_tag(dev,
17309                                                         act, action_flags,
17310                                                         attr, &flow_err);
17311                                 if (ret)
17312                                         return -rte_mtr_error_set(error,
17313                                         ENOTSUP,
17314                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17315                                         NULL, flow_err.message ?
17316                                         flow_err.message :
17317                                         "Set tag action validate check fail");
17318                                 /*
17319                                  * Count all modify-header actions
17320                                  * as one action.
17321                                  */
17322                                 if (!(action_flags &
17323                                         MLX5_FLOW_MODIFY_HDR_ACTIONS))
17324                                         ++actions_n;
17325                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
17326                                 break;
17327                         case RTE_FLOW_ACTION_TYPE_DROP:
17328                                 ret = mlx5_flow_validate_action_drop
17329                                         (action_flags,
17330                                         attr, &flow_err);
17331                                 if (ret < 0)
17332                                         return -rte_mtr_error_set(error,
17333                                         ENOTSUP,
17334                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17335                                         NULL, flow_err.message ?
17336                                         flow_err.message :
17337                                         "Drop action validate check fail");
17338                                 action_flags |= MLX5_FLOW_ACTION_DROP;
17339                                 ++actions_n;
17340                                 break;
17341                         case RTE_FLOW_ACTION_TYPE_QUEUE:
17342                                 /*
17343                                  * Check whether extensive
17344                                  * metadata feature is engaged.
17345                                  */
17346                                 if (dev_conf->dv_flow_en &&
17347                                         (dev_conf->dv_xmeta_en !=
17348                                         MLX5_XMETA_MODE_LEGACY) &&
17349                                         mlx5_flow_ext_mreg_supported(dev))
17350                                         return -rte_mtr_error_set(error,
17351                                           ENOTSUP,
17352                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17353                                           NULL, "Queue action with meta "
17354                                           "is not supported. Please try use "
17355                                           "default policy for meter.");
17356                                 ret = mlx5_flow_validate_action_queue(act,
17357                                                         action_flags, dev,
17358                                                         attr, &flow_err);
17359                                 if (ret < 0)
17360                                         return -rte_mtr_error_set(error,
17361                                           ENOTSUP,
17362                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17363                                           NULL, flow_err.message ?
17364                                           flow_err.message :
17365                                           "Queue action validate check fail");
17366                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
17367                                 ++actions_n;
17368                                 break;
17369                         case RTE_FLOW_ACTION_TYPE_RSS:
17370                                 if (dev_conf->dv_flow_en &&
17371                                         (dev_conf->dv_xmeta_en !=
17372                                         MLX5_XMETA_MODE_LEGACY) &&
17373                                         mlx5_flow_ext_mreg_supported(dev))
17374                                         return -rte_mtr_error_set(error,
17375                                           ENOTSUP,
17376                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17377                                           NULL, "RSS action with meta "
17378                                           "is not supported. Please try use "
17379                                           "default policy for meter.");
17380                                 ret = mlx5_validate_action_rss(dev, act,
17381                                                 &flow_err);
17382                                 if (ret < 0)
17383                                         return -rte_mtr_error_set(error,
17384                                           ENOTSUP,
17385                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17386                                           NULL, flow_err.message ?
17387                                           flow_err.message :
17388                                           "RSS action validate check fail");
17389                                 action_flags |= MLX5_FLOW_ACTION_RSS;
17390                                 ++actions_n;
17391                                 *is_rss = true;
17392                                 break;
17393                         case RTE_FLOW_ACTION_TYPE_JUMP:
17394                                 ret = flow_dv_validate_action_jump(dev,
17395                                         NULL, act, action_flags,
17396                                         attr, true, &flow_err);
17397                                 if (ret)
17398                                         return -rte_mtr_error_set(error,
17399                                           ENOTSUP,
17400                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17401                                           NULL, flow_err.message ?
17402                                           flow_err.message :
17403                                           "Jump action validate check fail");
17404                                 ++actions_n;
17405                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
17406                                 break;
17407                         case RTE_FLOW_ACTION_TYPE_METER:
17408                                 if (i != RTE_COLOR_GREEN)
17409                                         return -rte_mtr_error_set(error,
17410                                                 ENOTSUP,
17411                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17412                                                 NULL, flow_err.message ?
17413                                                 flow_err.message :
17414                                   "Meter hierarchy only supports GREEN color.");
17415                                 mtr = act->conf;
17416                                 ret = flow_dv_validate_policy_mtr_hierarchy(dev,
17417                                                         mtr->mtr_id,
17418                                                         action_flags,
17419                                                         is_rss,
17420                                                         &hierarchy_domain,
17421                                                         error);
17422                                 if (ret)
17423                                         return ret;
17424                                 ++actions_n;
17425                                 action_flags |=
17426                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
17427                                 break;
17428                         default:
17429                                 return -rte_mtr_error_set(error, ENOTSUP,
17430                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17431                                         NULL,
17432                                         "Doesn't support optional action");
17433                         }
17434                 }
17435                 /* Yellow is not supported, just skip. */
17436                 if (i == RTE_COLOR_YELLOW)
17437                         continue;
17438                 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
17439                         domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
17440                 else if ((action_flags &
17441                         (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
17442                         (action_flags & MLX5_FLOW_ACTION_MARK))
17443                         /*
17444                          * Only support MLX5_XMETA_MODE_LEGACY
17445                          * so MARK action only in ingress domain.
17446                          */
17447                         domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
17448                 else if (action_flags &
17449                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
17450                         domain_color[i] = hierarchy_domain;
17451                 else
17452                         domain_color[i] = def_domain;
17453                 /*
17454                  * Validate the drop action mutual exclusion
17455                  * with other actions. Drop action is mutually-exclusive
17456                  * with any other action, except for Count action.
17457                  */
17458                 if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
17459                         (action_flags & ~MLX5_FLOW_ACTION_DROP)) {
17460                         return -rte_mtr_error_set(error, ENOTSUP,
17461                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17462                                 NULL, "Drop action is mutually-exclusive "
17463                                 "with any other action");
17464                 }
17465                 /* Eswitch has few restrictions on using items and actions */
17466                 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
17467                         if (!mlx5_flow_ext_mreg_supported(dev) &&
17468                                 action_flags & MLX5_FLOW_ACTION_MARK)
17469                                 return -rte_mtr_error_set(error, ENOTSUP,
17470                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17471                                         NULL, "unsupported action MARK");
17472                         if (action_flags & MLX5_FLOW_ACTION_QUEUE)
17473                                 return -rte_mtr_error_set(error, ENOTSUP,
17474                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17475                                         NULL, "unsupported action QUEUE");
17476                         if (action_flags & MLX5_FLOW_ACTION_RSS)
17477                                 return -rte_mtr_error_set(error, ENOTSUP,
17478                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17479                                         NULL, "unsupported action RSS");
17480                         if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
17481                                 return -rte_mtr_error_set(error, ENOTSUP,
17482                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17483                                         NULL, "no fate action is found");
17484                 } else {
17485                         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) &&
17486                                 (domain_color[i] &
17487                                 MLX5_MTR_DOMAIN_INGRESS_BIT)) {
17488                                 if ((domain_color[i] &
17489                                         MLX5_MTR_DOMAIN_EGRESS_BIT))
17490                                         domain_color[i] =
17491                                         MLX5_MTR_DOMAIN_EGRESS_BIT;
17492                                 else
17493                                         return -rte_mtr_error_set(error,
17494                                         ENOTSUP,
17495                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17496                                         NULL, "no fate action is found");
17497                         }
17498                 }
17499                 if (domain_color[i] != def_domain)
17500                         *domain_bitmap = domain_color[i];
17501         }
17502         return 0;
17503 }
17504
17505 static int
17506 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
17507 {
17508         struct mlx5_priv *priv = dev->data->dev_private;
17509         int ret = 0;
17510
17511         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
17512                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
17513                                                 flags);
17514                 if (ret != 0)
17515                         return ret;
17516         }
17517         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
17518                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
17519                 if (ret != 0)
17520                         return ret;
17521         }
17522         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
17523                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
17524                 if (ret != 0)
17525                         return ret;
17526         }
17527         return 0;
17528 }
17529
17530 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
17531         .validate = flow_dv_validate,
17532         .prepare = flow_dv_prepare,
17533         .translate = flow_dv_translate,
17534         .apply = flow_dv_apply,
17535         .remove = flow_dv_remove,
17536         .destroy = flow_dv_destroy,
17537         .query = flow_dv_query,
17538         .create_mtr_tbls = flow_dv_create_mtr_tbls,
17539         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
17540         .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
17541         .create_meter = flow_dv_mtr_alloc,
17542         .free_meter = flow_dv_aso_mtr_release_to_pool,
17543         .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
17544         .create_mtr_acts = flow_dv_create_mtr_policy_acts,
17545         .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
17546         .create_policy_rules = flow_dv_create_policy_rules,
17547         .destroy_policy_rules = flow_dv_destroy_policy_rules,
17548         .create_def_policy = flow_dv_create_def_policy,
17549         .destroy_def_policy = flow_dv_destroy_def_policy,
17550         .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
17551         .meter_hierarchy_rule_create = flow_dv_meter_hierarchy_rule_create,
17552         .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
17553         .counter_alloc = flow_dv_counter_allocate,
17554         .counter_free = flow_dv_counter_free,
17555         .counter_query = flow_dv_counter_query,
17556         .get_aged_flows = flow_get_aged_flows,
17557         .action_validate = flow_dv_action_validate,
17558         .action_create = flow_dv_action_create,
17559         .action_destroy = flow_dv_action_destroy,
17560         .action_update = flow_dv_action_update,
17561         .action_query = flow_dv_action_query,
17562         .sync_domain = flow_dv_sync_domain,
17563 };
17564
17565 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
17566