net/mlx5: validate meter action in policy
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24 #include <rte_mtr.h>
25 #include <rte_mtr_driver.h>
26 #include <rte_tailq.h>
27
28 #include <mlx5_glue.h>
29 #include <mlx5_devx_cmds.h>
30 #include <mlx5_prm.h>
31 #include <mlx5_malloc.h>
32
33 #include "mlx5_defs.h"
34 #include "mlx5.h"
35 #include "mlx5_common_os.h"
36 #include "mlx5_flow.h"
37 #include "mlx5_flow_os.h"
38 #include "mlx5_rx.h"
39 #include "mlx5_tx.h"
40 #include "rte_pmd_mlx5.h"
41
42 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
43
44 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
45 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
46 #endif
47
48 #ifndef HAVE_MLX5DV_DR_ESWITCH
49 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
50 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
51 #endif
52 #endif
53
54 #ifndef HAVE_MLX5DV_DR
55 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
56 #endif
57
58 /* VLAN header definitions */
59 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
60 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
61 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
62 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
63 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
64
65 union flow_dv_attr {
66         struct {
67                 uint32_t valid:1;
68                 uint32_t ipv4:1;
69                 uint32_t ipv6:1;
70                 uint32_t tcp:1;
71                 uint32_t udp:1;
72                 uint32_t reserved:27;
73         };
74         uint32_t attr;
75 };
76
77 static int
78 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
79                              struct mlx5_flow_tbl_resource *tbl);
80
81 static int
82 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
83                                      uint32_t encap_decap_idx);
84
85 static int
86 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
87                                         uint32_t port_id);
88 static void
89 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
90
91 static int
92 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
93                                   uint32_t rix_jump);
94
95 /**
96  * Initialize flow attributes structure according to flow items' types.
97  *
98  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
99  * mode. For tunnel mode, the items to be modified are the outermost ones.
100  *
101  * @param[in] item
102  *   Pointer to item specification.
103  * @param[out] attr
104  *   Pointer to flow attributes structure.
105  * @param[in] dev_flow
106  *   Pointer to the sub flow.
107  * @param[in] tunnel_decap
108  *   Whether action is after tunnel decapsulation.
109  */
110 static void
111 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
112                   struct mlx5_flow *dev_flow, bool tunnel_decap)
113 {
114         uint64_t layers = dev_flow->handle->layers;
115
116         /*
117          * If layers is already initialized, it means this dev_flow is the
118          * suffix flow, the layers flags is set by the prefix flow. Need to
119          * use the layer flags from prefix flow as the suffix flow may not
120          * have the user defined items as the flow is split.
121          */
122         if (layers) {
123                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
124                         attr->ipv4 = 1;
125                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
126                         attr->ipv6 = 1;
127                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
128                         attr->tcp = 1;
129                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
130                         attr->udp = 1;
131                 attr->valid = 1;
132                 return;
133         }
134         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
135                 uint8_t next_protocol = 0xff;
136                 switch (item->type) {
137                 case RTE_FLOW_ITEM_TYPE_GRE:
138                 case RTE_FLOW_ITEM_TYPE_NVGRE:
139                 case RTE_FLOW_ITEM_TYPE_VXLAN:
140                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
141                 case RTE_FLOW_ITEM_TYPE_GENEVE:
142                 case RTE_FLOW_ITEM_TYPE_MPLS:
143                         if (tunnel_decap)
144                                 attr->attr = 0;
145                         break;
146                 case RTE_FLOW_ITEM_TYPE_IPV4:
147                         if (!attr->ipv6)
148                                 attr->ipv4 = 1;
149                         if (item->mask != NULL &&
150                             ((const struct rte_flow_item_ipv4 *)
151                             item->mask)->hdr.next_proto_id)
152                                 next_protocol =
153                                     ((const struct rte_flow_item_ipv4 *)
154                                       (item->spec))->hdr.next_proto_id &
155                                     ((const struct rte_flow_item_ipv4 *)
156                                       (item->mask))->hdr.next_proto_id;
157                         if ((next_protocol == IPPROTO_IPIP ||
158                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
159                                 attr->attr = 0;
160                         break;
161                 case RTE_FLOW_ITEM_TYPE_IPV6:
162                         if (!attr->ipv4)
163                                 attr->ipv6 = 1;
164                         if (item->mask != NULL &&
165                             ((const struct rte_flow_item_ipv6 *)
166                             item->mask)->hdr.proto)
167                                 next_protocol =
168                                     ((const struct rte_flow_item_ipv6 *)
169                                       (item->spec))->hdr.proto &
170                                     ((const struct rte_flow_item_ipv6 *)
171                                       (item->mask))->hdr.proto;
172                         if ((next_protocol == IPPROTO_IPIP ||
173                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
174                                 attr->attr = 0;
175                         break;
176                 case RTE_FLOW_ITEM_TYPE_UDP:
177                         if (!attr->tcp)
178                                 attr->udp = 1;
179                         break;
180                 case RTE_FLOW_ITEM_TYPE_TCP:
181                         if (!attr->udp)
182                                 attr->tcp = 1;
183                         break;
184                 default:
185                         break;
186                 }
187         }
188         attr->valid = 1;
189 }
190
191 /**
192  * Convert rte_mtr_color to mlx5 color.
193  *
194  * @param[in] rcol
195  *   rte_mtr_color.
196  *
197  * @return
198  *   mlx5 color.
199  */
200 static int
201 rte_col_2_mlx5_col(enum rte_color rcol)
202 {
203         switch (rcol) {
204         case RTE_COLOR_GREEN:
205                 return MLX5_FLOW_COLOR_GREEN;
206         case RTE_COLOR_YELLOW:
207                 return MLX5_FLOW_COLOR_YELLOW;
208         case RTE_COLOR_RED:
209                 return MLX5_FLOW_COLOR_RED;
210         default:
211                 break;
212         }
213         return MLX5_FLOW_COLOR_UNDEFINED;
214 }
215
216 struct field_modify_info {
217         uint32_t size; /* Size of field in protocol header, in bytes. */
218         uint32_t offset; /* Offset of field in protocol header, in bytes. */
219         enum mlx5_modification_field id;
220 };
221
222 struct field_modify_info modify_eth[] = {
223         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
224         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
225         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
226         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
227         {0, 0, 0},
228 };
229
230 struct field_modify_info modify_vlan_out_first_vid[] = {
231         /* Size in bits !!! */
232         {12, 0, MLX5_MODI_OUT_FIRST_VID},
233         {0, 0, 0},
234 };
235
236 struct field_modify_info modify_ipv4[] = {
237         {1,  1, MLX5_MODI_OUT_IP_DSCP},
238         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
239         {4, 12, MLX5_MODI_OUT_SIPV4},
240         {4, 16, MLX5_MODI_OUT_DIPV4},
241         {0, 0, 0},
242 };
243
244 struct field_modify_info modify_ipv6[] = {
245         {1,  0, MLX5_MODI_OUT_IP_DSCP},
246         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
247         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
248         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
249         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
250         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
251         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
252         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
253         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
254         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
255         {0, 0, 0},
256 };
257
258 struct field_modify_info modify_udp[] = {
259         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
260         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
261         {0, 0, 0},
262 };
263
264 struct field_modify_info modify_tcp[] = {
265         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
266         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
267         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
268         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
269         {0, 0, 0},
270 };
271
272 static const struct rte_flow_item *
273 mlx5_flow_find_tunnel_item(const struct rte_flow_item *item)
274 {
275         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
276                 switch (item->type) {
277                 default:
278                         break;
279                 case RTE_FLOW_ITEM_TYPE_VXLAN:
280                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
281                 case RTE_FLOW_ITEM_TYPE_GRE:
282                 case RTE_FLOW_ITEM_TYPE_MPLS:
283                 case RTE_FLOW_ITEM_TYPE_NVGRE:
284                 case RTE_FLOW_ITEM_TYPE_GENEVE:
285                         return item;
286                 case RTE_FLOW_ITEM_TYPE_IPV4:
287                 case RTE_FLOW_ITEM_TYPE_IPV6:
288                         if (item[1].type == RTE_FLOW_ITEM_TYPE_IPV4 ||
289                             item[1].type == RTE_FLOW_ITEM_TYPE_IPV6)
290                                 return item;
291                         break;
292                 }
293         }
294         return NULL;
295 }
296
297 static void
298 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
299                           uint8_t next_protocol, uint64_t *item_flags,
300                           int *tunnel)
301 {
302         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
303                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
304         if (next_protocol == IPPROTO_IPIP) {
305                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
306                 *tunnel = 1;
307         }
308         if (next_protocol == IPPROTO_IPV6) {
309                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
310                 *tunnel = 1;
311         }
312 }
313
314 /* Update VLAN's VID/PCP based on input rte_flow_action.
315  *
316  * @param[in] action
317  *   Pointer to struct rte_flow_action.
318  * @param[out] vlan
319  *   Pointer to struct rte_vlan_hdr.
320  */
321 static void
322 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
323                          struct rte_vlan_hdr *vlan)
324 {
325         uint16_t vlan_tci;
326         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
327                 vlan_tci =
328                     ((const struct rte_flow_action_of_set_vlan_pcp *)
329                                                action->conf)->vlan_pcp;
330                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
331                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
332                 vlan->vlan_tci |= vlan_tci;
333         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
334                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
335                 vlan->vlan_tci |= rte_be_to_cpu_16
336                     (((const struct rte_flow_action_of_set_vlan_vid *)
337                                              action->conf)->vlan_vid);
338         }
339 }
340
341 /**
342  * Fetch 1, 2, 3 or 4 byte field from the byte array
343  * and return as unsigned integer in host-endian format.
344  *
345  * @param[in] data
346  *   Pointer to data array.
347  * @param[in] size
348  *   Size of field to extract.
349  *
350  * @return
351  *   converted field in host endian format.
352  */
353 static inline uint32_t
354 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
355 {
356         uint32_t ret;
357
358         switch (size) {
359         case 1:
360                 ret = *data;
361                 break;
362         case 2:
363                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
364                 break;
365         case 3:
366                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
367                 ret = (ret << 8) | *(data + sizeof(uint16_t));
368                 break;
369         case 4:
370                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
371                 break;
372         default:
373                 MLX5_ASSERT(false);
374                 ret = 0;
375                 break;
376         }
377         return ret;
378 }
379
380 /**
381  * Convert modify-header action to DV specification.
382  *
383  * Data length of each action is determined by provided field description
384  * and the item mask. Data bit offset and width of each action is determined
385  * by provided item mask.
386  *
387  * @param[in] item
388  *   Pointer to item specification.
389  * @param[in] field
390  *   Pointer to field modification information.
391  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
392  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
393  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
394  * @param[in] dcopy
395  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
396  *   Negative offset value sets the same offset as source offset.
397  *   size field is ignored, value is taken from source field.
398  * @param[in,out] resource
399  *   Pointer to the modify-header resource.
400  * @param[in] type
401  *   Type of modification.
402  * @param[out] error
403  *   Pointer to the error structure.
404  *
405  * @return
406  *   0 on success, a negative errno value otherwise and rte_errno is set.
407  */
408 static int
409 flow_dv_convert_modify_action(struct rte_flow_item *item,
410                               struct field_modify_info *field,
411                               struct field_modify_info *dcopy,
412                               struct mlx5_flow_dv_modify_hdr_resource *resource,
413                               uint32_t type, struct rte_flow_error *error)
414 {
415         uint32_t i = resource->actions_num;
416         struct mlx5_modification_cmd *actions = resource->actions;
417         uint32_t carry_b = 0;
418
419         /*
420          * The item and mask are provided in big-endian format.
421          * The fields should be presented as in big-endian format either.
422          * Mask must be always present, it defines the actual field width.
423          */
424         MLX5_ASSERT(item->mask);
425         MLX5_ASSERT(field->size);
426         do {
427                 uint32_t size_b;
428                 uint32_t off_b;
429                 uint32_t mask;
430                 uint32_t data;
431                 bool next_field = true;
432                 bool next_dcopy = true;
433
434                 if (i >= MLX5_MAX_MODIFY_NUM)
435                         return rte_flow_error_set(error, EINVAL,
436                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
437                                  "too many items to modify");
438                 /* Fetch variable byte size mask from the array. */
439                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
440                                            field->offset, field->size);
441                 if (!mask) {
442                         ++field;
443                         continue;
444                 }
445                 /* Deduce actual data width in bits from mask value. */
446                 off_b = rte_bsf32(mask) + carry_b;
447                 size_b = sizeof(uint32_t) * CHAR_BIT -
448                          off_b - __builtin_clz(mask);
449                 MLX5_ASSERT(size_b);
450                 actions[i] = (struct mlx5_modification_cmd) {
451                         .action_type = type,
452                         .field = field->id,
453                         .offset = off_b,
454                         .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ?
455                                 0 : size_b,
456                 };
457                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
458                         MLX5_ASSERT(dcopy);
459                         actions[i].dst_field = dcopy->id;
460                         actions[i].dst_offset =
461                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
462                         /* Convert entire record to big-endian format. */
463                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
464                         /*
465                          * Destination field overflow. Copy leftovers of
466                          * a source field to the next destination field.
467                          */
468                         carry_b = 0;
469                         if ((size_b > dcopy->size * CHAR_BIT - dcopy->offset) &&
470                             dcopy->size != 0) {
471                                 actions[i].length =
472                                         dcopy->size * CHAR_BIT - dcopy->offset;
473                                 carry_b = actions[i].length;
474                                 next_field = false;
475                         }
476                         /*
477                          * Not enough bits in a source filed to fill a
478                          * destination field. Switch to the next source.
479                          */
480                         if ((size_b < dcopy->size * CHAR_BIT - dcopy->offset) &&
481                             (size_b == field->size * CHAR_BIT - off_b)) {
482                                 actions[i].length =
483                                         field->size * CHAR_BIT - off_b;
484                                 dcopy->offset += actions[i].length;
485                                 next_dcopy = false;
486                         }
487                         if (next_dcopy)
488                                 ++dcopy;
489                 } else {
490                         MLX5_ASSERT(item->spec);
491                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
492                                                    field->offset, field->size);
493                         /* Shift out the trailing masked bits from data. */
494                         data = (data & mask) >> off_b;
495                         actions[i].data1 = rte_cpu_to_be_32(data);
496                 }
497                 /* Convert entire record to expected big-endian format. */
498                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
499                 if (next_field)
500                         ++field;
501                 ++i;
502         } while (field->size);
503         if (resource->actions_num == i)
504                 return rte_flow_error_set(error, EINVAL,
505                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
506                                           "invalid modification flow item");
507         resource->actions_num = i;
508         return 0;
509 }
510
511 /**
512  * Convert modify-header set IPv4 address action to DV specification.
513  *
514  * @param[in,out] resource
515  *   Pointer to the modify-header resource.
516  * @param[in] action
517  *   Pointer to action specification.
518  * @param[out] error
519  *   Pointer to the error structure.
520  *
521  * @return
522  *   0 on success, a negative errno value otherwise and rte_errno is set.
523  */
524 static int
525 flow_dv_convert_action_modify_ipv4
526                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
527                          const struct rte_flow_action *action,
528                          struct rte_flow_error *error)
529 {
530         const struct rte_flow_action_set_ipv4 *conf =
531                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
532         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
533         struct rte_flow_item_ipv4 ipv4;
534         struct rte_flow_item_ipv4 ipv4_mask;
535
536         memset(&ipv4, 0, sizeof(ipv4));
537         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
538         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
539                 ipv4.hdr.src_addr = conf->ipv4_addr;
540                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
541         } else {
542                 ipv4.hdr.dst_addr = conf->ipv4_addr;
543                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
544         }
545         item.spec = &ipv4;
546         item.mask = &ipv4_mask;
547         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
548                                              MLX5_MODIFICATION_TYPE_SET, error);
549 }
550
551 /**
552  * Convert modify-header set IPv6 address action to DV specification.
553  *
554  * @param[in,out] resource
555  *   Pointer to the modify-header resource.
556  * @param[in] action
557  *   Pointer to action specification.
558  * @param[out] error
559  *   Pointer to the error structure.
560  *
561  * @return
562  *   0 on success, a negative errno value otherwise and rte_errno is set.
563  */
564 static int
565 flow_dv_convert_action_modify_ipv6
566                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
567                          const struct rte_flow_action *action,
568                          struct rte_flow_error *error)
569 {
570         const struct rte_flow_action_set_ipv6 *conf =
571                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
572         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
573         struct rte_flow_item_ipv6 ipv6;
574         struct rte_flow_item_ipv6 ipv6_mask;
575
576         memset(&ipv6, 0, sizeof(ipv6));
577         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
578         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
579                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
580                        sizeof(ipv6.hdr.src_addr));
581                 memcpy(&ipv6_mask.hdr.src_addr,
582                        &rte_flow_item_ipv6_mask.hdr.src_addr,
583                        sizeof(ipv6.hdr.src_addr));
584         } else {
585                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
586                        sizeof(ipv6.hdr.dst_addr));
587                 memcpy(&ipv6_mask.hdr.dst_addr,
588                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
589                        sizeof(ipv6.hdr.dst_addr));
590         }
591         item.spec = &ipv6;
592         item.mask = &ipv6_mask;
593         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
594                                              MLX5_MODIFICATION_TYPE_SET, error);
595 }
596
597 /**
598  * Convert modify-header set MAC address action to DV specification.
599  *
600  * @param[in,out] resource
601  *   Pointer to the modify-header resource.
602  * @param[in] action
603  *   Pointer to action specification.
604  * @param[out] error
605  *   Pointer to the error structure.
606  *
607  * @return
608  *   0 on success, a negative errno value otherwise and rte_errno is set.
609  */
610 static int
611 flow_dv_convert_action_modify_mac
612                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
613                          const struct rte_flow_action *action,
614                          struct rte_flow_error *error)
615 {
616         const struct rte_flow_action_set_mac *conf =
617                 (const struct rte_flow_action_set_mac *)(action->conf);
618         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
619         struct rte_flow_item_eth eth;
620         struct rte_flow_item_eth eth_mask;
621
622         memset(&eth, 0, sizeof(eth));
623         memset(&eth_mask, 0, sizeof(eth_mask));
624         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
625                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
626                        sizeof(eth.src.addr_bytes));
627                 memcpy(&eth_mask.src.addr_bytes,
628                        &rte_flow_item_eth_mask.src.addr_bytes,
629                        sizeof(eth_mask.src.addr_bytes));
630         } else {
631                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
632                        sizeof(eth.dst.addr_bytes));
633                 memcpy(&eth_mask.dst.addr_bytes,
634                        &rte_flow_item_eth_mask.dst.addr_bytes,
635                        sizeof(eth_mask.dst.addr_bytes));
636         }
637         item.spec = &eth;
638         item.mask = &eth_mask;
639         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
640                                              MLX5_MODIFICATION_TYPE_SET, error);
641 }
642
643 /**
644  * Convert modify-header set VLAN VID action to DV specification.
645  *
646  * @param[in,out] resource
647  *   Pointer to the modify-header resource.
648  * @param[in] action
649  *   Pointer to action specification.
650  * @param[out] error
651  *   Pointer to the error structure.
652  *
653  * @return
654  *   0 on success, a negative errno value otherwise and rte_errno is set.
655  */
656 static int
657 flow_dv_convert_action_modify_vlan_vid
658                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
659                          const struct rte_flow_action *action,
660                          struct rte_flow_error *error)
661 {
662         const struct rte_flow_action_of_set_vlan_vid *conf =
663                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
664         int i = resource->actions_num;
665         struct mlx5_modification_cmd *actions = resource->actions;
666         struct field_modify_info *field = modify_vlan_out_first_vid;
667
668         if (i >= MLX5_MAX_MODIFY_NUM)
669                 return rte_flow_error_set(error, EINVAL,
670                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
671                          "too many items to modify");
672         actions[i] = (struct mlx5_modification_cmd) {
673                 .action_type = MLX5_MODIFICATION_TYPE_SET,
674                 .field = field->id,
675                 .length = field->size,
676                 .offset = field->offset,
677         };
678         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
679         actions[i].data1 = conf->vlan_vid;
680         actions[i].data1 = actions[i].data1 << 16;
681         resource->actions_num = ++i;
682         return 0;
683 }
684
685 /**
686  * Convert modify-header set TP action to DV specification.
687  *
688  * @param[in,out] resource
689  *   Pointer to the modify-header resource.
690  * @param[in] action
691  *   Pointer to action specification.
692  * @param[in] items
693  *   Pointer to rte_flow_item objects list.
694  * @param[in] attr
695  *   Pointer to flow attributes structure.
696  * @param[in] dev_flow
697  *   Pointer to the sub flow.
698  * @param[in] tunnel_decap
699  *   Whether action is after tunnel decapsulation.
700  * @param[out] error
701  *   Pointer to the error structure.
702  *
703  * @return
704  *   0 on success, a negative errno value otherwise and rte_errno is set.
705  */
706 static int
707 flow_dv_convert_action_modify_tp
708                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
709                          const struct rte_flow_action *action,
710                          const struct rte_flow_item *items,
711                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
712                          bool tunnel_decap, struct rte_flow_error *error)
713 {
714         const struct rte_flow_action_set_tp *conf =
715                 (const struct rte_flow_action_set_tp *)(action->conf);
716         struct rte_flow_item item;
717         struct rte_flow_item_udp udp;
718         struct rte_flow_item_udp udp_mask;
719         struct rte_flow_item_tcp tcp;
720         struct rte_flow_item_tcp tcp_mask;
721         struct field_modify_info *field;
722
723         if (!attr->valid)
724                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
725         if (attr->udp) {
726                 memset(&udp, 0, sizeof(udp));
727                 memset(&udp_mask, 0, sizeof(udp_mask));
728                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
729                         udp.hdr.src_port = conf->port;
730                         udp_mask.hdr.src_port =
731                                         rte_flow_item_udp_mask.hdr.src_port;
732                 } else {
733                         udp.hdr.dst_port = conf->port;
734                         udp_mask.hdr.dst_port =
735                                         rte_flow_item_udp_mask.hdr.dst_port;
736                 }
737                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
738                 item.spec = &udp;
739                 item.mask = &udp_mask;
740                 field = modify_udp;
741         } else {
742                 MLX5_ASSERT(attr->tcp);
743                 memset(&tcp, 0, sizeof(tcp));
744                 memset(&tcp_mask, 0, sizeof(tcp_mask));
745                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
746                         tcp.hdr.src_port = conf->port;
747                         tcp_mask.hdr.src_port =
748                                         rte_flow_item_tcp_mask.hdr.src_port;
749                 } else {
750                         tcp.hdr.dst_port = conf->port;
751                         tcp_mask.hdr.dst_port =
752                                         rte_flow_item_tcp_mask.hdr.dst_port;
753                 }
754                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
755                 item.spec = &tcp;
756                 item.mask = &tcp_mask;
757                 field = modify_tcp;
758         }
759         return flow_dv_convert_modify_action(&item, field, NULL, resource,
760                                              MLX5_MODIFICATION_TYPE_SET, error);
761 }
762
763 /**
764  * Convert modify-header set TTL action to DV specification.
765  *
766  * @param[in,out] resource
767  *   Pointer to the modify-header resource.
768  * @param[in] action
769  *   Pointer to action specification.
770  * @param[in] items
771  *   Pointer to rte_flow_item objects list.
772  * @param[in] attr
773  *   Pointer to flow attributes structure.
774  * @param[in] dev_flow
775  *   Pointer to the sub flow.
776  * @param[in] tunnel_decap
777  *   Whether action is after tunnel decapsulation.
778  * @param[out] error
779  *   Pointer to the error structure.
780  *
781  * @return
782  *   0 on success, a negative errno value otherwise and rte_errno is set.
783  */
784 static int
785 flow_dv_convert_action_modify_ttl
786                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
787                          const struct rte_flow_action *action,
788                          const struct rte_flow_item *items,
789                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
790                          bool tunnel_decap, struct rte_flow_error *error)
791 {
792         const struct rte_flow_action_set_ttl *conf =
793                 (const struct rte_flow_action_set_ttl *)(action->conf);
794         struct rte_flow_item item;
795         struct rte_flow_item_ipv4 ipv4;
796         struct rte_flow_item_ipv4 ipv4_mask;
797         struct rte_flow_item_ipv6 ipv6;
798         struct rte_flow_item_ipv6 ipv6_mask;
799         struct field_modify_info *field;
800
801         if (!attr->valid)
802                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
803         if (attr->ipv4) {
804                 memset(&ipv4, 0, sizeof(ipv4));
805                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
806                 ipv4.hdr.time_to_live = conf->ttl_value;
807                 ipv4_mask.hdr.time_to_live = 0xFF;
808                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
809                 item.spec = &ipv4;
810                 item.mask = &ipv4_mask;
811                 field = modify_ipv4;
812         } else {
813                 MLX5_ASSERT(attr->ipv6);
814                 memset(&ipv6, 0, sizeof(ipv6));
815                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
816                 ipv6.hdr.hop_limits = conf->ttl_value;
817                 ipv6_mask.hdr.hop_limits = 0xFF;
818                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
819                 item.spec = &ipv6;
820                 item.mask = &ipv6_mask;
821                 field = modify_ipv6;
822         }
823         return flow_dv_convert_modify_action(&item, field, NULL, resource,
824                                              MLX5_MODIFICATION_TYPE_SET, error);
825 }
826
827 /**
828  * Convert modify-header decrement TTL action to DV specification.
829  *
830  * @param[in,out] resource
831  *   Pointer to the modify-header resource.
832  * @param[in] action
833  *   Pointer to action specification.
834  * @param[in] items
835  *   Pointer to rte_flow_item objects list.
836  * @param[in] attr
837  *   Pointer to flow attributes structure.
838  * @param[in] dev_flow
839  *   Pointer to the sub flow.
840  * @param[in] tunnel_decap
841  *   Whether action is after tunnel decapsulation.
842  * @param[out] error
843  *   Pointer to the error structure.
844  *
845  * @return
846  *   0 on success, a negative errno value otherwise and rte_errno is set.
847  */
848 static int
849 flow_dv_convert_action_modify_dec_ttl
850                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
851                          const struct rte_flow_item *items,
852                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
853                          bool tunnel_decap, struct rte_flow_error *error)
854 {
855         struct rte_flow_item item;
856         struct rte_flow_item_ipv4 ipv4;
857         struct rte_flow_item_ipv4 ipv4_mask;
858         struct rte_flow_item_ipv6 ipv6;
859         struct rte_flow_item_ipv6 ipv6_mask;
860         struct field_modify_info *field;
861
862         if (!attr->valid)
863                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
864         if (attr->ipv4) {
865                 memset(&ipv4, 0, sizeof(ipv4));
866                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
867                 ipv4.hdr.time_to_live = 0xFF;
868                 ipv4_mask.hdr.time_to_live = 0xFF;
869                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
870                 item.spec = &ipv4;
871                 item.mask = &ipv4_mask;
872                 field = modify_ipv4;
873         } else {
874                 MLX5_ASSERT(attr->ipv6);
875                 memset(&ipv6, 0, sizeof(ipv6));
876                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
877                 ipv6.hdr.hop_limits = 0xFF;
878                 ipv6_mask.hdr.hop_limits = 0xFF;
879                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
880                 item.spec = &ipv6;
881                 item.mask = &ipv6_mask;
882                 field = modify_ipv6;
883         }
884         return flow_dv_convert_modify_action(&item, field, NULL, resource,
885                                              MLX5_MODIFICATION_TYPE_ADD, error);
886 }
887
888 /**
889  * Convert modify-header increment/decrement TCP Sequence number
890  * to DV specification.
891  *
892  * @param[in,out] resource
893  *   Pointer to the modify-header resource.
894  * @param[in] action
895  *   Pointer to action specification.
896  * @param[out] error
897  *   Pointer to the error structure.
898  *
899  * @return
900  *   0 on success, a negative errno value otherwise and rte_errno is set.
901  */
902 static int
903 flow_dv_convert_action_modify_tcp_seq
904                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
905                          const struct rte_flow_action *action,
906                          struct rte_flow_error *error)
907 {
908         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
909         uint64_t value = rte_be_to_cpu_32(*conf);
910         struct rte_flow_item item;
911         struct rte_flow_item_tcp tcp;
912         struct rte_flow_item_tcp tcp_mask;
913
914         memset(&tcp, 0, sizeof(tcp));
915         memset(&tcp_mask, 0, sizeof(tcp_mask));
916         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
917                 /*
918                  * The HW has no decrement operation, only increment operation.
919                  * To simulate decrement X from Y using increment operation
920                  * we need to add UINT32_MAX X times to Y.
921                  * Each adding of UINT32_MAX decrements Y by 1.
922                  */
923                 value *= UINT32_MAX;
924         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
925         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
926         item.type = RTE_FLOW_ITEM_TYPE_TCP;
927         item.spec = &tcp;
928         item.mask = &tcp_mask;
929         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
930                                              MLX5_MODIFICATION_TYPE_ADD, error);
931 }
932
933 /**
934  * Convert modify-header increment/decrement TCP Acknowledgment number
935  * to DV specification.
936  *
937  * @param[in,out] resource
938  *   Pointer to the modify-header resource.
939  * @param[in] action
940  *   Pointer to action specification.
941  * @param[out] error
942  *   Pointer to the error structure.
943  *
944  * @return
945  *   0 on success, a negative errno value otherwise and rte_errno is set.
946  */
947 static int
948 flow_dv_convert_action_modify_tcp_ack
949                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
950                          const struct rte_flow_action *action,
951                          struct rte_flow_error *error)
952 {
953         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
954         uint64_t value = rte_be_to_cpu_32(*conf);
955         struct rte_flow_item item;
956         struct rte_flow_item_tcp tcp;
957         struct rte_flow_item_tcp tcp_mask;
958
959         memset(&tcp, 0, sizeof(tcp));
960         memset(&tcp_mask, 0, sizeof(tcp_mask));
961         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
962                 /*
963                  * The HW has no decrement operation, only increment operation.
964                  * To simulate decrement X from Y using increment operation
965                  * we need to add UINT32_MAX X times to Y.
966                  * Each adding of UINT32_MAX decrements Y by 1.
967                  */
968                 value *= UINT32_MAX;
969         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
970         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
971         item.type = RTE_FLOW_ITEM_TYPE_TCP;
972         item.spec = &tcp;
973         item.mask = &tcp_mask;
974         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
975                                              MLX5_MODIFICATION_TYPE_ADD, error);
976 }
977
978 static enum mlx5_modification_field reg_to_field[] = {
979         [REG_NON] = MLX5_MODI_OUT_NONE,
980         [REG_A] = MLX5_MODI_META_DATA_REG_A,
981         [REG_B] = MLX5_MODI_META_DATA_REG_B,
982         [REG_C_0] = MLX5_MODI_META_REG_C_0,
983         [REG_C_1] = MLX5_MODI_META_REG_C_1,
984         [REG_C_2] = MLX5_MODI_META_REG_C_2,
985         [REG_C_3] = MLX5_MODI_META_REG_C_3,
986         [REG_C_4] = MLX5_MODI_META_REG_C_4,
987         [REG_C_5] = MLX5_MODI_META_REG_C_5,
988         [REG_C_6] = MLX5_MODI_META_REG_C_6,
989         [REG_C_7] = MLX5_MODI_META_REG_C_7,
990 };
991
992 /**
993  * Convert register set to DV specification.
994  *
995  * @param[in,out] resource
996  *   Pointer to the modify-header resource.
997  * @param[in] action
998  *   Pointer to action specification.
999  * @param[out] error
1000  *   Pointer to the error structure.
1001  *
1002  * @return
1003  *   0 on success, a negative errno value otherwise and rte_errno is set.
1004  */
1005 static int
1006 flow_dv_convert_action_set_reg
1007                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1008                          const struct rte_flow_action *action,
1009                          struct rte_flow_error *error)
1010 {
1011         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
1012         struct mlx5_modification_cmd *actions = resource->actions;
1013         uint32_t i = resource->actions_num;
1014
1015         if (i >= MLX5_MAX_MODIFY_NUM)
1016                 return rte_flow_error_set(error, EINVAL,
1017                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1018                                           "too many items to modify");
1019         MLX5_ASSERT(conf->id != REG_NON);
1020         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
1021         actions[i] = (struct mlx5_modification_cmd) {
1022                 .action_type = MLX5_MODIFICATION_TYPE_SET,
1023                 .field = reg_to_field[conf->id],
1024                 .offset = conf->offset,
1025                 .length = conf->length,
1026         };
1027         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1028         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1029         ++i;
1030         resource->actions_num = i;
1031         return 0;
1032 }
1033
1034 /**
1035  * Convert SET_TAG action to DV specification.
1036  *
1037  * @param[in] dev
1038  *   Pointer to the rte_eth_dev structure.
1039  * @param[in,out] resource
1040  *   Pointer to the modify-header resource.
1041  * @param[in] conf
1042  *   Pointer to action specification.
1043  * @param[out] error
1044  *   Pointer to the error structure.
1045  *
1046  * @return
1047  *   0 on success, a negative errno value otherwise and rte_errno is set.
1048  */
1049 static int
1050 flow_dv_convert_action_set_tag
1051                         (struct rte_eth_dev *dev,
1052                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1053                          const struct rte_flow_action_set_tag *conf,
1054                          struct rte_flow_error *error)
1055 {
1056         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1057         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1058         struct rte_flow_item item = {
1059                 .spec = &data,
1060                 .mask = &mask,
1061         };
1062         struct field_modify_info reg_c_x[] = {
1063                 [1] = {0, 0, 0},
1064         };
1065         enum mlx5_modification_field reg_type;
1066         int ret;
1067
1068         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1069         if (ret < 0)
1070                 return ret;
1071         MLX5_ASSERT(ret != REG_NON);
1072         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1073         reg_type = reg_to_field[ret];
1074         MLX5_ASSERT(reg_type > 0);
1075         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1076         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1077                                              MLX5_MODIFICATION_TYPE_SET, error);
1078 }
1079
1080 /**
1081  * Convert internal COPY_REG action to DV specification.
1082  *
1083  * @param[in] dev
1084  *   Pointer to the rte_eth_dev structure.
1085  * @param[in,out] res
1086  *   Pointer to the modify-header resource.
1087  * @param[in] action
1088  *   Pointer to action specification.
1089  * @param[out] error
1090  *   Pointer to the error structure.
1091  *
1092  * @return
1093  *   0 on success, a negative errno value otherwise and rte_errno is set.
1094  */
1095 static int
1096 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1097                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1098                                  const struct rte_flow_action *action,
1099                                  struct rte_flow_error *error)
1100 {
1101         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1102         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1103         struct rte_flow_item item = {
1104                 .spec = NULL,
1105                 .mask = &mask,
1106         };
1107         struct field_modify_info reg_src[] = {
1108                 {4, 0, reg_to_field[conf->src]},
1109                 {0, 0, 0},
1110         };
1111         struct field_modify_info reg_dst = {
1112                 .offset = 0,
1113                 .id = reg_to_field[conf->dst],
1114         };
1115         /* Adjust reg_c[0] usage according to reported mask. */
1116         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1117                 struct mlx5_priv *priv = dev->data->dev_private;
1118                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1119
1120                 MLX5_ASSERT(reg_c0);
1121                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1122                 if (conf->dst == REG_C_0) {
1123                         /* Copy to reg_c[0], within mask only. */
1124                         reg_dst.offset = rte_bsf32(reg_c0);
1125                         /*
1126                          * Mask is ignoring the enianness, because
1127                          * there is no conversion in datapath.
1128                          */
1129 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1130                         /* Copy from destination lower bits to reg_c[0]. */
1131                         mask = reg_c0 >> reg_dst.offset;
1132 #else
1133                         /* Copy from destination upper bits to reg_c[0]. */
1134                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1135                                           rte_fls_u32(reg_c0));
1136 #endif
1137                 } else {
1138                         mask = rte_cpu_to_be_32(reg_c0);
1139 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1140                         /* Copy from reg_c[0] to destination lower bits. */
1141                         reg_dst.offset = 0;
1142 #else
1143                         /* Copy from reg_c[0] to destination upper bits. */
1144                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1145                                          (rte_fls_u32(reg_c0) -
1146                                           rte_bsf32(reg_c0));
1147 #endif
1148                 }
1149         }
1150         return flow_dv_convert_modify_action(&item,
1151                                              reg_src, &reg_dst, res,
1152                                              MLX5_MODIFICATION_TYPE_COPY,
1153                                              error);
1154 }
1155
1156 /**
1157  * Convert MARK action to DV specification. This routine is used
1158  * in extensive metadata only and requires metadata register to be
1159  * handled. In legacy mode hardware tag resource is engaged.
1160  *
1161  * @param[in] dev
1162  *   Pointer to the rte_eth_dev structure.
1163  * @param[in] conf
1164  *   Pointer to MARK action specification.
1165  * @param[in,out] resource
1166  *   Pointer to the modify-header resource.
1167  * @param[out] error
1168  *   Pointer to the error structure.
1169  *
1170  * @return
1171  *   0 on success, a negative errno value otherwise and rte_errno is set.
1172  */
1173 static int
1174 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1175                             const struct rte_flow_action_mark *conf,
1176                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1177                             struct rte_flow_error *error)
1178 {
1179         struct mlx5_priv *priv = dev->data->dev_private;
1180         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1181                                            priv->sh->dv_mark_mask);
1182         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1183         struct rte_flow_item item = {
1184                 .spec = &data,
1185                 .mask = &mask,
1186         };
1187         struct field_modify_info reg_c_x[] = {
1188                 [1] = {0, 0, 0},
1189         };
1190         int reg;
1191
1192         if (!mask)
1193                 return rte_flow_error_set(error, EINVAL,
1194                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1195                                           NULL, "zero mark action mask");
1196         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1197         if (reg < 0)
1198                 return reg;
1199         MLX5_ASSERT(reg > 0);
1200         if (reg == REG_C_0) {
1201                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1202                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1203
1204                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1205                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1206                 mask = rte_cpu_to_be_32(mask << shl_c0);
1207         }
1208         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1209         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1210                                              MLX5_MODIFICATION_TYPE_SET, error);
1211 }
1212
1213 /**
1214  * Get metadata register index for specified steering domain.
1215  *
1216  * @param[in] dev
1217  *   Pointer to the rte_eth_dev structure.
1218  * @param[in] attr
1219  *   Attributes of flow to determine steering domain.
1220  * @param[out] error
1221  *   Pointer to the error structure.
1222  *
1223  * @return
1224  *   positive index on success, a negative errno value otherwise
1225  *   and rte_errno is set.
1226  */
1227 static enum modify_reg
1228 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1229                          const struct rte_flow_attr *attr,
1230                          struct rte_flow_error *error)
1231 {
1232         int reg =
1233                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1234                                           MLX5_METADATA_FDB :
1235                                             attr->egress ?
1236                                             MLX5_METADATA_TX :
1237                                             MLX5_METADATA_RX, 0, error);
1238         if (reg < 0)
1239                 return rte_flow_error_set(error,
1240                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1241                                           NULL, "unavailable "
1242                                           "metadata register");
1243         return reg;
1244 }
1245
1246 /**
1247  * Convert SET_META action to DV specification.
1248  *
1249  * @param[in] dev
1250  *   Pointer to the rte_eth_dev structure.
1251  * @param[in,out] resource
1252  *   Pointer to the modify-header resource.
1253  * @param[in] attr
1254  *   Attributes of flow that includes this item.
1255  * @param[in] conf
1256  *   Pointer to action specification.
1257  * @param[out] error
1258  *   Pointer to the error structure.
1259  *
1260  * @return
1261  *   0 on success, a negative errno value otherwise and rte_errno is set.
1262  */
1263 static int
1264 flow_dv_convert_action_set_meta
1265                         (struct rte_eth_dev *dev,
1266                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1267                          const struct rte_flow_attr *attr,
1268                          const struct rte_flow_action_set_meta *conf,
1269                          struct rte_flow_error *error)
1270 {
1271         uint32_t mask = rte_cpu_to_be_32(conf->mask);
1272         uint32_t data = rte_cpu_to_be_32(conf->data) & mask;
1273         struct rte_flow_item item = {
1274                 .spec = &data,
1275                 .mask = &mask,
1276         };
1277         struct field_modify_info reg_c_x[] = {
1278                 [1] = {0, 0, 0},
1279         };
1280         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1281
1282         if (reg < 0)
1283                 return reg;
1284         MLX5_ASSERT(reg != REG_NON);
1285         if (reg == REG_C_0) {
1286                 struct mlx5_priv *priv = dev->data->dev_private;
1287                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1288                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1289
1290                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1291                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1292                 mask = rte_cpu_to_be_32(mask << shl_c0);
1293         }
1294         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1295         /* The routine expects parameters in memory as big-endian ones. */
1296         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1297                                              MLX5_MODIFICATION_TYPE_SET, error);
1298 }
1299
1300 /**
1301  * Convert modify-header set IPv4 DSCP action to DV specification.
1302  *
1303  * @param[in,out] resource
1304  *   Pointer to the modify-header resource.
1305  * @param[in] action
1306  *   Pointer to action specification.
1307  * @param[out] error
1308  *   Pointer to the error structure.
1309  *
1310  * @return
1311  *   0 on success, a negative errno value otherwise and rte_errno is set.
1312  */
1313 static int
1314 flow_dv_convert_action_modify_ipv4_dscp
1315                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1316                          const struct rte_flow_action *action,
1317                          struct rte_flow_error *error)
1318 {
1319         const struct rte_flow_action_set_dscp *conf =
1320                 (const struct rte_flow_action_set_dscp *)(action->conf);
1321         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1322         struct rte_flow_item_ipv4 ipv4;
1323         struct rte_flow_item_ipv4 ipv4_mask;
1324
1325         memset(&ipv4, 0, sizeof(ipv4));
1326         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1327         ipv4.hdr.type_of_service = conf->dscp;
1328         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1329         item.spec = &ipv4;
1330         item.mask = &ipv4_mask;
1331         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1332                                              MLX5_MODIFICATION_TYPE_SET, error);
1333 }
1334
1335 /**
1336  * Convert modify-header set IPv6 DSCP action to DV specification.
1337  *
1338  * @param[in,out] resource
1339  *   Pointer to the modify-header resource.
1340  * @param[in] action
1341  *   Pointer to action specification.
1342  * @param[out] error
1343  *   Pointer to the error structure.
1344  *
1345  * @return
1346  *   0 on success, a negative errno value otherwise and rte_errno is set.
1347  */
1348 static int
1349 flow_dv_convert_action_modify_ipv6_dscp
1350                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1351                          const struct rte_flow_action *action,
1352                          struct rte_flow_error *error)
1353 {
1354         const struct rte_flow_action_set_dscp *conf =
1355                 (const struct rte_flow_action_set_dscp *)(action->conf);
1356         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1357         struct rte_flow_item_ipv6 ipv6;
1358         struct rte_flow_item_ipv6 ipv6_mask;
1359
1360         memset(&ipv6, 0, sizeof(ipv6));
1361         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1362         /*
1363          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1364          * rdma-core only accept the DSCP bits byte aligned start from
1365          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1366          * bits in IPv6 case as rdma-core requires byte aligned value.
1367          */
1368         ipv6.hdr.vtc_flow = conf->dscp;
1369         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1370         item.spec = &ipv6;
1371         item.mask = &ipv6_mask;
1372         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1373                                              MLX5_MODIFICATION_TYPE_SET, error);
1374 }
1375
1376 static int
1377 mlx5_flow_item_field_width(struct mlx5_dev_config *config,
1378                            enum rte_flow_field_id field)
1379 {
1380         switch (field) {
1381         case RTE_FLOW_FIELD_START:
1382                 return 32;
1383         case RTE_FLOW_FIELD_MAC_DST:
1384         case RTE_FLOW_FIELD_MAC_SRC:
1385                 return 48;
1386         case RTE_FLOW_FIELD_VLAN_TYPE:
1387                 return 16;
1388         case RTE_FLOW_FIELD_VLAN_ID:
1389                 return 12;
1390         case RTE_FLOW_FIELD_MAC_TYPE:
1391                 return 16;
1392         case RTE_FLOW_FIELD_IPV4_DSCP:
1393                 return 6;
1394         case RTE_FLOW_FIELD_IPV4_TTL:
1395                 return 8;
1396         case RTE_FLOW_FIELD_IPV4_SRC:
1397         case RTE_FLOW_FIELD_IPV4_DST:
1398                 return 32;
1399         case RTE_FLOW_FIELD_IPV6_DSCP:
1400                 return 6;
1401         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1402                 return 8;
1403         case RTE_FLOW_FIELD_IPV6_SRC:
1404         case RTE_FLOW_FIELD_IPV6_DST:
1405                 return 128;
1406         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1407         case RTE_FLOW_FIELD_TCP_PORT_DST:
1408                 return 16;
1409         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1410         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1411                 return 32;
1412         case RTE_FLOW_FIELD_TCP_FLAGS:
1413                 return 9;
1414         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1415         case RTE_FLOW_FIELD_UDP_PORT_DST:
1416                 return 16;
1417         case RTE_FLOW_FIELD_VXLAN_VNI:
1418         case RTE_FLOW_FIELD_GENEVE_VNI:
1419                 return 24;
1420         case RTE_FLOW_FIELD_GTP_TEID:
1421         case RTE_FLOW_FIELD_TAG:
1422                 return 32;
1423         case RTE_FLOW_FIELD_MARK:
1424                 return 24;
1425         case RTE_FLOW_FIELD_META:
1426                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_META16)
1427                         return 16;
1428                 else if (config->dv_xmeta_en == MLX5_XMETA_MODE_META32)
1429                         return 32;
1430                 else
1431                         return 0;
1432         case RTE_FLOW_FIELD_POINTER:
1433         case RTE_FLOW_FIELD_VALUE:
1434                 return 64;
1435         default:
1436                 MLX5_ASSERT(false);
1437         }
1438         return 0;
1439 }
1440
1441 static void
1442 mlx5_flow_field_id_to_modify_info
1443                 (const struct rte_flow_action_modify_data *data,
1444                  struct field_modify_info *info,
1445                  uint32_t *mask, uint32_t *value,
1446                  uint32_t width, uint32_t dst_width,
1447                  struct rte_eth_dev *dev,
1448                  const struct rte_flow_attr *attr,
1449                  struct rte_flow_error *error)
1450 {
1451         struct mlx5_priv *priv = dev->data->dev_private;
1452         struct mlx5_dev_config *config = &priv->config;
1453         uint32_t idx = 0;
1454         uint32_t off = 0;
1455         uint64_t val = 0;
1456         switch (data->field) {
1457         case RTE_FLOW_FIELD_START:
1458                 /* not supported yet */
1459                 MLX5_ASSERT(false);
1460                 break;
1461         case RTE_FLOW_FIELD_MAC_DST:
1462                 off = data->offset > 16 ? data->offset - 16 : 0;
1463                 if (mask) {
1464                         if (data->offset < 16) {
1465                                 info[idx] = (struct field_modify_info){2, 0,
1466                                                 MLX5_MODI_OUT_DMAC_15_0};
1467                                 if (width < 16) {
1468                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1469                                                                  (16 - width));
1470                                         width = 0;
1471                                 } else {
1472                                         mask[idx] = RTE_BE16(0xffff);
1473                                         width -= 16;
1474                                 }
1475                                 if (!width)
1476                                         break;
1477                                 ++idx;
1478                         }
1479                         info[idx] = (struct field_modify_info){4, 4 * idx,
1480                                                 MLX5_MODI_OUT_DMAC_47_16};
1481                         mask[idx] = rte_cpu_to_be_32((0xffffffff >>
1482                                                       (32 - width)) << off);
1483                 } else {
1484                         if (data->offset < 16)
1485                                 info[idx++] = (struct field_modify_info){2, 0,
1486                                                 MLX5_MODI_OUT_DMAC_15_0};
1487                         info[idx] = (struct field_modify_info){4, off,
1488                                                 MLX5_MODI_OUT_DMAC_47_16};
1489                 }
1490                 break;
1491         case RTE_FLOW_FIELD_MAC_SRC:
1492                 off = data->offset > 16 ? data->offset - 16 : 0;
1493                 if (mask) {
1494                         if (data->offset < 16) {
1495                                 info[idx] = (struct field_modify_info){2, 0,
1496                                                 MLX5_MODI_OUT_SMAC_15_0};
1497                                 if (width < 16) {
1498                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1499                                                                  (16 - width));
1500                                         width = 0;
1501                                 } else {
1502                                         mask[idx] = RTE_BE16(0xffff);
1503                                         width -= 16;
1504                                 }
1505                                 if (!width)
1506                                         break;
1507                                 ++idx;
1508                         }
1509                         info[idx] = (struct field_modify_info){4, 4 * idx,
1510                                                 MLX5_MODI_OUT_SMAC_47_16};
1511                         mask[idx] = rte_cpu_to_be_32((0xffffffff >>
1512                                                       (32 - width)) << off);
1513                 } else {
1514                         if (data->offset < 16)
1515                                 info[idx++] = (struct field_modify_info){2, 0,
1516                                                 MLX5_MODI_OUT_SMAC_15_0};
1517                         info[idx] = (struct field_modify_info){4, off,
1518                                                 MLX5_MODI_OUT_SMAC_47_16};
1519                 }
1520                 break;
1521         case RTE_FLOW_FIELD_VLAN_TYPE:
1522                 /* not supported yet */
1523                 break;
1524         case RTE_FLOW_FIELD_VLAN_ID:
1525                 info[idx] = (struct field_modify_info){2, 0,
1526                                         MLX5_MODI_OUT_FIRST_VID};
1527                 if (mask)
1528                         mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1529                 break;
1530         case RTE_FLOW_FIELD_MAC_TYPE:
1531                 info[idx] = (struct field_modify_info){2, 0,
1532                                         MLX5_MODI_OUT_ETHERTYPE};
1533                 if (mask)
1534                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1535                 break;
1536         case RTE_FLOW_FIELD_IPV4_DSCP:
1537                 info[idx] = (struct field_modify_info){1, 0,
1538                                         MLX5_MODI_OUT_IP_DSCP};
1539                 if (mask)
1540                         mask[idx] = 0x3f >> (6 - width);
1541                 break;
1542         case RTE_FLOW_FIELD_IPV4_TTL:
1543                 info[idx] = (struct field_modify_info){1, 0,
1544                                         MLX5_MODI_OUT_IPV4_TTL};
1545                 if (mask)
1546                         mask[idx] = 0xff >> (8 - width);
1547                 break;
1548         case RTE_FLOW_FIELD_IPV4_SRC:
1549                 info[idx] = (struct field_modify_info){4, 0,
1550                                         MLX5_MODI_OUT_SIPV4};
1551                 if (mask)
1552                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1553                                                      (32 - width));
1554                 break;
1555         case RTE_FLOW_FIELD_IPV4_DST:
1556                 info[idx] = (struct field_modify_info){4, 0,
1557                                         MLX5_MODI_OUT_DIPV4};
1558                 if (mask)
1559                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1560                                                      (32 - width));
1561                 break;
1562         case RTE_FLOW_FIELD_IPV6_DSCP:
1563                 info[idx] = (struct field_modify_info){1, 0,
1564                                         MLX5_MODI_OUT_IP_DSCP};
1565                 if (mask)
1566                         mask[idx] = 0x3f >> (6 - width);
1567                 break;
1568         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1569                 info[idx] = (struct field_modify_info){1, 0,
1570                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1571                 if (mask)
1572                         mask[idx] = 0xff >> (8 - width);
1573                 break;
1574         case RTE_FLOW_FIELD_IPV6_SRC:
1575                 if (mask) {
1576                         if (data->offset < 32) {
1577                                 info[idx] = (struct field_modify_info){4,
1578                                                 4 * idx,
1579                                                 MLX5_MODI_OUT_SIPV6_31_0};
1580                                 if (width < 32) {
1581                                         mask[idx] =
1582                                                 rte_cpu_to_be_32(0xffffffff >>
1583                                                                  (32 - width));
1584                                         width = 0;
1585                                 } else {
1586                                         mask[idx] = RTE_BE32(0xffffffff);
1587                                         width -= 32;
1588                                 }
1589                                 if (!width)
1590                                         break;
1591                                 ++idx;
1592                         }
1593                         if (data->offset < 64) {
1594                                 info[idx] = (struct field_modify_info){4,
1595                                                 4 * idx,
1596                                                 MLX5_MODI_OUT_SIPV6_63_32};
1597                                 if (width < 32) {
1598                                         mask[idx] =
1599                                                 rte_cpu_to_be_32(0xffffffff >>
1600                                                                  (32 - width));
1601                                         width = 0;
1602                                 } else {
1603                                         mask[idx] = RTE_BE32(0xffffffff);
1604                                         width -= 32;
1605                                 }
1606                                 if (!width)
1607                                         break;
1608                                 ++idx;
1609                         }
1610                         if (data->offset < 96) {
1611                                 info[idx] = (struct field_modify_info){4,
1612                                                 4 * idx,
1613                                                 MLX5_MODI_OUT_SIPV6_95_64};
1614                                 if (width < 32) {
1615                                         mask[idx] =
1616                                                 rte_cpu_to_be_32(0xffffffff >>
1617                                                                  (32 - width));
1618                                         width = 0;
1619                                 } else {
1620                                         mask[idx] = RTE_BE32(0xffffffff);
1621                                         width -= 32;
1622                                 }
1623                                 if (!width)
1624                                         break;
1625                                 ++idx;
1626                         }
1627                         info[idx] = (struct field_modify_info){4, 4 * idx,
1628                                                 MLX5_MODI_OUT_SIPV6_127_96};
1629                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1630                                                      (32 - width));
1631                 } else {
1632                         if (data->offset < 32)
1633                                 info[idx++] = (struct field_modify_info){4, 0,
1634                                                 MLX5_MODI_OUT_SIPV6_31_0};
1635                         if (data->offset < 64)
1636                                 info[idx++] = (struct field_modify_info){4, 0,
1637                                                 MLX5_MODI_OUT_SIPV6_63_32};
1638                         if (data->offset < 96)
1639                                 info[idx++] = (struct field_modify_info){4, 0,
1640                                                 MLX5_MODI_OUT_SIPV6_95_64};
1641                         if (data->offset < 128)
1642                                 info[idx++] = (struct field_modify_info){4, 0,
1643                                                 MLX5_MODI_OUT_SIPV6_127_96};
1644                 }
1645                 break;
1646         case RTE_FLOW_FIELD_IPV6_DST:
1647                 if (mask) {
1648                         if (data->offset < 32) {
1649                                 info[idx] = (struct field_modify_info){4,
1650                                                 4 * idx,
1651                                                 MLX5_MODI_OUT_DIPV6_31_0};
1652                                 if (width < 32) {
1653                                         mask[idx] =
1654                                                 rte_cpu_to_be_32(0xffffffff >>
1655                                                                  (32 - width));
1656                                         width = 0;
1657                                 } else {
1658                                         mask[idx] = RTE_BE32(0xffffffff);
1659                                         width -= 32;
1660                                 }
1661                                 if (!width)
1662                                         break;
1663                                 ++idx;
1664                         }
1665                         if (data->offset < 64) {
1666                                 info[idx] = (struct field_modify_info){4,
1667                                                 4 * idx,
1668                                                 MLX5_MODI_OUT_DIPV6_63_32};
1669                                 if (width < 32) {
1670                                         mask[idx] =
1671                                                 rte_cpu_to_be_32(0xffffffff >>
1672                                                                  (32 - width));
1673                                         width = 0;
1674                                 } else {
1675                                         mask[idx] = RTE_BE32(0xffffffff);
1676                                         width -= 32;
1677                                 }
1678                                 if (!width)
1679                                         break;
1680                                 ++idx;
1681                         }
1682                         if (data->offset < 96) {
1683                                 info[idx] = (struct field_modify_info){4,
1684                                                 4 * idx,
1685                                                 MLX5_MODI_OUT_DIPV6_95_64};
1686                                 if (width < 32) {
1687                                         mask[idx] =
1688                                                 rte_cpu_to_be_32(0xffffffff >>
1689                                                                  (32 - width));
1690                                         width = 0;
1691                                 } else {
1692                                         mask[idx] = RTE_BE32(0xffffffff);
1693                                         width -= 32;
1694                                 }
1695                                 if (!width)
1696                                         break;
1697                                 ++idx;
1698                         }
1699                         info[idx] = (struct field_modify_info){4, 4 * idx,
1700                                                 MLX5_MODI_OUT_DIPV6_127_96};
1701                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1702                                                      (32 - width));
1703                 } else {
1704                         if (data->offset < 32)
1705                                 info[idx++] = (struct field_modify_info){4, 0,
1706                                                 MLX5_MODI_OUT_DIPV6_31_0};
1707                         if (data->offset < 64)
1708                                 info[idx++] = (struct field_modify_info){4, 0,
1709                                                 MLX5_MODI_OUT_DIPV6_63_32};
1710                         if (data->offset < 96)
1711                                 info[idx++] = (struct field_modify_info){4, 0,
1712                                                 MLX5_MODI_OUT_DIPV6_95_64};
1713                         if (data->offset < 128)
1714                                 info[idx++] = (struct field_modify_info){4, 0,
1715                                                 MLX5_MODI_OUT_DIPV6_127_96};
1716                 }
1717                 break;
1718         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1719                 info[idx] = (struct field_modify_info){2, 0,
1720                                         MLX5_MODI_OUT_TCP_SPORT};
1721                 if (mask)
1722                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1723                 break;
1724         case RTE_FLOW_FIELD_TCP_PORT_DST:
1725                 info[idx] = (struct field_modify_info){2, 0,
1726                                         MLX5_MODI_OUT_TCP_DPORT};
1727                 if (mask)
1728                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1729                 break;
1730         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1731                 info[idx] = (struct field_modify_info){4, 0,
1732                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1733                 if (mask)
1734                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1735                                                      (32 - width));
1736                 break;
1737         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1738                 info[idx] = (struct field_modify_info){4, 0,
1739                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1740                 if (mask)
1741                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1742                                                      (32 - width));
1743                 break;
1744         case RTE_FLOW_FIELD_TCP_FLAGS:
1745                 info[idx] = (struct field_modify_info){2, 0,
1746                                         MLX5_MODI_OUT_TCP_FLAGS};
1747                 if (mask)
1748                         mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
1749                 break;
1750         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1751                 info[idx] = (struct field_modify_info){2, 0,
1752                                         MLX5_MODI_OUT_UDP_SPORT};
1753                 if (mask)
1754                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1755                 break;
1756         case RTE_FLOW_FIELD_UDP_PORT_DST:
1757                 info[idx] = (struct field_modify_info){2, 0,
1758                                         MLX5_MODI_OUT_UDP_DPORT};
1759                 if (mask)
1760                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1761                 break;
1762         case RTE_FLOW_FIELD_VXLAN_VNI:
1763                 /* not supported yet */
1764                 break;
1765         case RTE_FLOW_FIELD_GENEVE_VNI:
1766                 /* not supported yet*/
1767                 break;
1768         case RTE_FLOW_FIELD_GTP_TEID:
1769                 info[idx] = (struct field_modify_info){4, 0,
1770                                         MLX5_MODI_GTP_TEID};
1771                 if (mask)
1772                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1773                                                      (32 - width));
1774                 break;
1775         case RTE_FLOW_FIELD_TAG:
1776                 {
1777                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1778                                                    data->level, error);
1779                         if (reg < 0)
1780                                 return;
1781                         MLX5_ASSERT(reg != REG_NON);
1782                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1783                         info[idx] = (struct field_modify_info){4, 0,
1784                                                 reg_to_field[reg]};
1785                         if (mask)
1786                                 mask[idx] =
1787                                         rte_cpu_to_be_32(0xffffffff >>
1788                                                          (32 - width));
1789                 }
1790                 break;
1791         case RTE_FLOW_FIELD_MARK:
1792                 {
1793                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1794                                                        0, error);
1795                         if (reg < 0)
1796                                 return;
1797                         MLX5_ASSERT(reg != REG_NON);
1798                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1799                         info[idx] = (struct field_modify_info){4, 0,
1800                                                 reg_to_field[reg]};
1801                         if (mask)
1802                                 mask[idx] =
1803                                         rte_cpu_to_be_32(0xffffffff >>
1804                                                          (32 - width));
1805                 }
1806                 break;
1807         case RTE_FLOW_FIELD_META:
1808                 {
1809                         unsigned int xmeta = config->dv_xmeta_en;
1810                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1811                         if (reg < 0)
1812                                 return;
1813                         MLX5_ASSERT(reg != REG_NON);
1814                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1815                         if (xmeta == MLX5_XMETA_MODE_META16) {
1816                                 info[idx] = (struct field_modify_info){2, 0,
1817                                                         reg_to_field[reg]};
1818                                 if (mask)
1819                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1820                                                                 (16 - width));
1821                         } else if (xmeta == MLX5_XMETA_MODE_META32) {
1822                                 info[idx] = (struct field_modify_info){4, 0,
1823                                                         reg_to_field[reg]};
1824                                 if (mask)
1825                                         mask[idx] =
1826                                                 rte_cpu_to_be_32(0xffffffff >>
1827                                                                 (32 - width));
1828                         } else {
1829                                 MLX5_ASSERT(false);
1830                         }
1831                 }
1832                 break;
1833         case RTE_FLOW_FIELD_POINTER:
1834         case RTE_FLOW_FIELD_VALUE:
1835                 if (data->field == RTE_FLOW_FIELD_POINTER)
1836                         memcpy(&val, (void *)(uintptr_t)data->value,
1837                                sizeof(uint64_t));
1838                 else
1839                         val = data->value;
1840                 for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
1841                         if (mask[idx]) {
1842                                 if (dst_width == 48) {
1843                                         /*special case for MAC addresses */
1844                                         value[idx] = rte_cpu_to_be_16(val);
1845                                         val >>= 16;
1846                                         dst_width -= 16;
1847                                 } else if (dst_width > 16) {
1848                                         value[idx] = rte_cpu_to_be_32(val);
1849                                         val >>= 32;
1850                                 } else if (dst_width > 8) {
1851                                         value[idx] = rte_cpu_to_be_16(val);
1852                                         val >>= 16;
1853                                 } else {
1854                                         value[idx] = (uint8_t)val;
1855                                         val >>= 8;
1856                                 }
1857                                 if (!val)
1858                                         break;
1859                         }
1860                 }
1861                 break;
1862         default:
1863                 MLX5_ASSERT(false);
1864                 break;
1865         }
1866 }
1867
1868 /**
1869  * Convert modify_field action to DV specification.
1870  *
1871  * @param[in] dev
1872  *   Pointer to the rte_eth_dev structure.
1873  * @param[in,out] resource
1874  *   Pointer to the modify-header resource.
1875  * @param[in] action
1876  *   Pointer to action specification.
1877  * @param[in] attr
1878  *   Attributes of flow that includes this item.
1879  * @param[out] error
1880  *   Pointer to the error structure.
1881  *
1882  * @return
1883  *   0 on success, a negative errno value otherwise and rte_errno is set.
1884  */
1885 static int
1886 flow_dv_convert_action_modify_field
1887                         (struct rte_eth_dev *dev,
1888                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1889                          const struct rte_flow_action *action,
1890                          const struct rte_flow_attr *attr,
1891                          struct rte_flow_error *error)
1892 {
1893         struct mlx5_priv *priv = dev->data->dev_private;
1894         struct mlx5_dev_config *config = &priv->config;
1895         const struct rte_flow_action_modify_field *conf =
1896                 (const struct rte_flow_action_modify_field *)(action->conf);
1897         struct rte_flow_item item;
1898         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1899                                                                 {0, 0, 0} };
1900         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1901                                                                 {0, 0, 0} };
1902         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1903         uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1904         uint32_t type;
1905         uint32_t dst_width = mlx5_flow_item_field_width(config,
1906                                                         conf->dst.field);
1907
1908         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1909                 conf->src.field == RTE_FLOW_FIELD_VALUE) {
1910                 type = MLX5_MODIFICATION_TYPE_SET;
1911                 /** For SET fill the destination field (field) first. */
1912                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1913                         value, conf->width, dst_width, dev, attr, error);
1914                 /** Then copy immediate value from source as per mask. */
1915                 mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask,
1916                         value, conf->width, dst_width, dev, attr, error);
1917                 item.spec = &value;
1918         } else {
1919                 type = MLX5_MODIFICATION_TYPE_COPY;
1920                 /** For COPY fill the destination field (dcopy) without mask. */
1921                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1922                         value, conf->width, dst_width, dev, attr, error);
1923                 /** Then construct the source field (field) with mask. */
1924                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1925                         value, conf->width, dst_width, dev, attr, error);
1926         }
1927         item.mask = &mask;
1928         return flow_dv_convert_modify_action(&item,
1929                         field, dcopy, resource, type, error);
1930 }
1931
1932 /**
1933  * Validate MARK item.
1934  *
1935  * @param[in] dev
1936  *   Pointer to the rte_eth_dev structure.
1937  * @param[in] item
1938  *   Item specification.
1939  * @param[in] attr
1940  *   Attributes of flow that includes this item.
1941  * @param[out] error
1942  *   Pointer to error structure.
1943  *
1944  * @return
1945  *   0 on success, a negative errno value otherwise and rte_errno is set.
1946  */
1947 static int
1948 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1949                            const struct rte_flow_item *item,
1950                            const struct rte_flow_attr *attr __rte_unused,
1951                            struct rte_flow_error *error)
1952 {
1953         struct mlx5_priv *priv = dev->data->dev_private;
1954         struct mlx5_dev_config *config = &priv->config;
1955         const struct rte_flow_item_mark *spec = item->spec;
1956         const struct rte_flow_item_mark *mask = item->mask;
1957         const struct rte_flow_item_mark nic_mask = {
1958                 .id = priv->sh->dv_mark_mask,
1959         };
1960         int ret;
1961
1962         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1963                 return rte_flow_error_set(error, ENOTSUP,
1964                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1965                                           "extended metadata feature"
1966                                           " isn't enabled");
1967         if (!mlx5_flow_ext_mreg_supported(dev))
1968                 return rte_flow_error_set(error, ENOTSUP,
1969                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1970                                           "extended metadata register"
1971                                           " isn't supported");
1972         if (!nic_mask.id)
1973                 return rte_flow_error_set(error, ENOTSUP,
1974                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1975                                           "extended metadata register"
1976                                           " isn't available");
1977         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1978         if (ret < 0)
1979                 return ret;
1980         if (!spec)
1981                 return rte_flow_error_set(error, EINVAL,
1982                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1983                                           item->spec,
1984                                           "data cannot be empty");
1985         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1986                 return rte_flow_error_set(error, EINVAL,
1987                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1988                                           &spec->id,
1989                                           "mark id exceeds the limit");
1990         if (!mask)
1991                 mask = &nic_mask;
1992         if (!mask->id)
1993                 return rte_flow_error_set(error, EINVAL,
1994                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1995                                         "mask cannot be zero");
1996
1997         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1998                                         (const uint8_t *)&nic_mask,
1999                                         sizeof(struct rte_flow_item_mark),
2000                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2001         if (ret < 0)
2002                 return ret;
2003         return 0;
2004 }
2005
2006 /**
2007  * Validate META item.
2008  *
2009  * @param[in] dev
2010  *   Pointer to the rte_eth_dev structure.
2011  * @param[in] item
2012  *   Item specification.
2013  * @param[in] attr
2014  *   Attributes of flow that includes this item.
2015  * @param[out] error
2016  *   Pointer to error structure.
2017  *
2018  * @return
2019  *   0 on success, a negative errno value otherwise and rte_errno is set.
2020  */
2021 static int
2022 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
2023                            const struct rte_flow_item *item,
2024                            const struct rte_flow_attr *attr,
2025                            struct rte_flow_error *error)
2026 {
2027         struct mlx5_priv *priv = dev->data->dev_private;
2028         struct mlx5_dev_config *config = &priv->config;
2029         const struct rte_flow_item_meta *spec = item->spec;
2030         const struct rte_flow_item_meta *mask = item->mask;
2031         struct rte_flow_item_meta nic_mask = {
2032                 .data = UINT32_MAX
2033         };
2034         int reg;
2035         int ret;
2036
2037         if (!spec)
2038                 return rte_flow_error_set(error, EINVAL,
2039                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2040                                           item->spec,
2041                                           "data cannot be empty");
2042         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2043                 if (!mlx5_flow_ext_mreg_supported(dev))
2044                         return rte_flow_error_set(error, ENOTSUP,
2045                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2046                                           "extended metadata register"
2047                                           " isn't supported");
2048                 reg = flow_dv_get_metadata_reg(dev, attr, error);
2049                 if (reg < 0)
2050                         return reg;
2051                 if (reg == REG_NON)
2052                         return rte_flow_error_set(error, ENOTSUP,
2053                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2054                                         "unavalable extended metadata register");
2055                 if (reg == REG_B)
2056                         return rte_flow_error_set(error, ENOTSUP,
2057                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2058                                           "match on reg_b "
2059                                           "isn't supported");
2060                 if (reg != REG_A)
2061                         nic_mask.data = priv->sh->dv_meta_mask;
2062         } else {
2063                 if (attr->transfer)
2064                         return rte_flow_error_set(error, ENOTSUP,
2065                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2066                                         "extended metadata feature "
2067                                         "should be enabled when "
2068                                         "meta item is requested "
2069                                         "with e-switch mode ");
2070                 if (attr->ingress)
2071                         return rte_flow_error_set(error, ENOTSUP,
2072                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2073                                         "match on metadata for ingress "
2074                                         "is not supported in legacy "
2075                                         "metadata mode");
2076         }
2077         if (!mask)
2078                 mask = &rte_flow_item_meta_mask;
2079         if (!mask->data)
2080                 return rte_flow_error_set(error, EINVAL,
2081                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2082                                         "mask cannot be zero");
2083
2084         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2085                                         (const uint8_t *)&nic_mask,
2086                                         sizeof(struct rte_flow_item_meta),
2087                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2088         return ret;
2089 }
2090
2091 /**
2092  * Validate TAG item.
2093  *
2094  * @param[in] dev
2095  *   Pointer to the rte_eth_dev structure.
2096  * @param[in] item
2097  *   Item specification.
2098  * @param[in] attr
2099  *   Attributes of flow that includes this item.
2100  * @param[out] error
2101  *   Pointer to error structure.
2102  *
2103  * @return
2104  *   0 on success, a negative errno value otherwise and rte_errno is set.
2105  */
2106 static int
2107 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2108                           const struct rte_flow_item *item,
2109                           const struct rte_flow_attr *attr __rte_unused,
2110                           struct rte_flow_error *error)
2111 {
2112         const struct rte_flow_item_tag *spec = item->spec;
2113         const struct rte_flow_item_tag *mask = item->mask;
2114         const struct rte_flow_item_tag nic_mask = {
2115                 .data = RTE_BE32(UINT32_MAX),
2116                 .index = 0xff,
2117         };
2118         int ret;
2119
2120         if (!mlx5_flow_ext_mreg_supported(dev))
2121                 return rte_flow_error_set(error, ENOTSUP,
2122                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2123                                           "extensive metadata register"
2124                                           " isn't supported");
2125         if (!spec)
2126                 return rte_flow_error_set(error, EINVAL,
2127                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2128                                           item->spec,
2129                                           "data cannot be empty");
2130         if (!mask)
2131                 mask = &rte_flow_item_tag_mask;
2132         if (!mask->data)
2133                 return rte_flow_error_set(error, EINVAL,
2134                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2135                                         "mask cannot be zero");
2136
2137         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2138                                         (const uint8_t *)&nic_mask,
2139                                         sizeof(struct rte_flow_item_tag),
2140                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2141         if (ret < 0)
2142                 return ret;
2143         if (mask->index != 0xff)
2144                 return rte_flow_error_set(error, EINVAL,
2145                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2146                                           "partial mask for tag index"
2147                                           " is not supported");
2148         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2149         if (ret < 0)
2150                 return ret;
2151         MLX5_ASSERT(ret != REG_NON);
2152         return 0;
2153 }
2154
2155 /**
2156  * Validate vport item.
2157  *
2158  * @param[in] dev
2159  *   Pointer to the rte_eth_dev structure.
2160  * @param[in] item
2161  *   Item specification.
2162  * @param[in] attr
2163  *   Attributes of flow that includes this item.
2164  * @param[in] item_flags
2165  *   Bit-fields that holds the items detected until now.
2166  * @param[out] error
2167  *   Pointer to error structure.
2168  *
2169  * @return
2170  *   0 on success, a negative errno value otherwise and rte_errno is set.
2171  */
2172 static int
2173 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2174                               const struct rte_flow_item *item,
2175                               const struct rte_flow_attr *attr,
2176                               uint64_t item_flags,
2177                               struct rte_flow_error *error)
2178 {
2179         const struct rte_flow_item_port_id *spec = item->spec;
2180         const struct rte_flow_item_port_id *mask = item->mask;
2181         const struct rte_flow_item_port_id switch_mask = {
2182                         .id = 0xffffffff,
2183         };
2184         struct mlx5_priv *esw_priv;
2185         struct mlx5_priv *dev_priv;
2186         int ret;
2187
2188         if (!attr->transfer)
2189                 return rte_flow_error_set(error, EINVAL,
2190                                           RTE_FLOW_ERROR_TYPE_ITEM,
2191                                           NULL,
2192                                           "match on port id is valid only"
2193                                           " when transfer flag is enabled");
2194         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2195                 return rte_flow_error_set(error, ENOTSUP,
2196                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2197                                           "multiple source ports are not"
2198                                           " supported");
2199         if (!mask)
2200                 mask = &switch_mask;
2201         if (mask->id != 0xffffffff)
2202                 return rte_flow_error_set(error, ENOTSUP,
2203                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2204                                            mask,
2205                                            "no support for partial mask on"
2206                                            " \"id\" field");
2207         ret = mlx5_flow_item_acceptable
2208                                 (item, (const uint8_t *)mask,
2209                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2210                                  sizeof(struct rte_flow_item_port_id),
2211                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2212         if (ret)
2213                 return ret;
2214         if (!spec)
2215                 return 0;
2216         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2217         if (!esw_priv)
2218                 return rte_flow_error_set(error, rte_errno,
2219                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2220                                           "failed to obtain E-Switch info for"
2221                                           " port");
2222         dev_priv = mlx5_dev_to_eswitch_info(dev);
2223         if (!dev_priv)
2224                 return rte_flow_error_set(error, rte_errno,
2225                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2226                                           NULL,
2227                                           "failed to obtain E-Switch info");
2228         if (esw_priv->domain_id != dev_priv->domain_id)
2229                 return rte_flow_error_set(error, EINVAL,
2230                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2231                                           "cannot match on a port from a"
2232                                           " different E-Switch");
2233         return 0;
2234 }
2235
2236 /**
2237  * Validate VLAN item.
2238  *
2239  * @param[in] item
2240  *   Item specification.
2241  * @param[in] item_flags
2242  *   Bit-fields that holds the items detected until now.
2243  * @param[in] dev
2244  *   Ethernet device flow is being created on.
2245  * @param[out] error
2246  *   Pointer to error structure.
2247  *
2248  * @return
2249  *   0 on success, a negative errno value otherwise and rte_errno is set.
2250  */
2251 static int
2252 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2253                            uint64_t item_flags,
2254                            struct rte_eth_dev *dev,
2255                            struct rte_flow_error *error)
2256 {
2257         const struct rte_flow_item_vlan *mask = item->mask;
2258         const struct rte_flow_item_vlan nic_mask = {
2259                 .tci = RTE_BE16(UINT16_MAX),
2260                 .inner_type = RTE_BE16(UINT16_MAX),
2261                 .has_more_vlan = 1,
2262         };
2263         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2264         int ret;
2265         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2266                                         MLX5_FLOW_LAYER_INNER_L4) :
2267                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2268                                         MLX5_FLOW_LAYER_OUTER_L4);
2269         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2270                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2271
2272         if (item_flags & vlanm)
2273                 return rte_flow_error_set(error, EINVAL,
2274                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2275                                           "multiple VLAN layers not supported");
2276         else if ((item_flags & l34m) != 0)
2277                 return rte_flow_error_set(error, EINVAL,
2278                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2279                                           "VLAN cannot follow L3/L4 layer");
2280         if (!mask)
2281                 mask = &rte_flow_item_vlan_mask;
2282         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2283                                         (const uint8_t *)&nic_mask,
2284                                         sizeof(struct rte_flow_item_vlan),
2285                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2286         if (ret)
2287                 return ret;
2288         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2289                 struct mlx5_priv *priv = dev->data->dev_private;
2290
2291                 if (priv->vmwa_context) {
2292                         /*
2293                          * Non-NULL context means we have a virtual machine
2294                          * and SR-IOV enabled, we have to create VLAN interface
2295                          * to make hypervisor to setup E-Switch vport
2296                          * context correctly. We avoid creating the multiple
2297                          * VLAN interfaces, so we cannot support VLAN tag mask.
2298                          */
2299                         return rte_flow_error_set(error, EINVAL,
2300                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2301                                                   item,
2302                                                   "VLAN tag mask is not"
2303                                                   " supported in virtual"
2304                                                   " environment");
2305                 }
2306         }
2307         return 0;
2308 }
2309
2310 /*
2311  * GTP flags are contained in 1 byte of the format:
2312  * -------------------------------------------
2313  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2314  * |-----------------------------------------|
2315  * | value | Version | PT | Res | E | S | PN |
2316  * -------------------------------------------
2317  *
2318  * Matching is supported only for GTP flags E, S, PN.
2319  */
2320 #define MLX5_GTP_FLAGS_MASK     0x07
2321
2322 /**
2323  * Validate GTP item.
2324  *
2325  * @param[in] dev
2326  *   Pointer to the rte_eth_dev structure.
2327  * @param[in] item
2328  *   Item specification.
2329  * @param[in] item_flags
2330  *   Bit-fields that holds the items detected until now.
2331  * @param[out] error
2332  *   Pointer to error structure.
2333  *
2334  * @return
2335  *   0 on success, a negative errno value otherwise and rte_errno is set.
2336  */
2337 static int
2338 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2339                           const struct rte_flow_item *item,
2340                           uint64_t item_flags,
2341                           struct rte_flow_error *error)
2342 {
2343         struct mlx5_priv *priv = dev->data->dev_private;
2344         const struct rte_flow_item_gtp *spec = item->spec;
2345         const struct rte_flow_item_gtp *mask = item->mask;
2346         const struct rte_flow_item_gtp nic_mask = {
2347                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2348                 .msg_type = 0xff,
2349                 .teid = RTE_BE32(0xffffffff),
2350         };
2351
2352         if (!priv->config.hca_attr.tunnel_stateless_gtp)
2353                 return rte_flow_error_set(error, ENOTSUP,
2354                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2355                                           "GTP support is not enabled");
2356         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2357                 return rte_flow_error_set(error, ENOTSUP,
2358                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2359                                           "multiple tunnel layers not"
2360                                           " supported");
2361         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2362                 return rte_flow_error_set(error, EINVAL,
2363                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2364                                           "no outer UDP layer found");
2365         if (!mask)
2366                 mask = &rte_flow_item_gtp_mask;
2367         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2368                 return rte_flow_error_set(error, ENOTSUP,
2369                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2370                                           "Match is supported for GTP"
2371                                           " flags only");
2372         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2373                                          (const uint8_t *)&nic_mask,
2374                                          sizeof(struct rte_flow_item_gtp),
2375                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2376 }
2377
2378 /**
2379  * Validate GTP PSC item.
2380  *
2381  * @param[in] item
2382  *   Item specification.
2383  * @param[in] last_item
2384  *   Previous validated item in the pattern items.
2385  * @param[in] gtp_item
2386  *   Previous GTP item specification.
2387  * @param[in] attr
2388  *   Pointer to flow attributes.
2389  * @param[out] error
2390  *   Pointer to error structure.
2391  *
2392  * @return
2393  *   0 on success, a negative errno value otherwise and rte_errno is set.
2394  */
2395 static int
2396 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2397                               uint64_t last_item,
2398                               const struct rte_flow_item *gtp_item,
2399                               const struct rte_flow_attr *attr,
2400                               struct rte_flow_error *error)
2401 {
2402         const struct rte_flow_item_gtp *gtp_spec;
2403         const struct rte_flow_item_gtp *gtp_mask;
2404         const struct rte_flow_item_gtp_psc *spec;
2405         const struct rte_flow_item_gtp_psc *mask;
2406         const struct rte_flow_item_gtp_psc nic_mask = {
2407                 .pdu_type = 0xFF,
2408                 .qfi = 0xFF,
2409         };
2410
2411         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2412                 return rte_flow_error_set
2413                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2414                          "GTP PSC item must be preceded with GTP item");
2415         gtp_spec = gtp_item->spec;
2416         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2417         /* GTP spec and E flag is requested to match zero. */
2418         if (gtp_spec &&
2419                 (gtp_mask->v_pt_rsv_flags &
2420                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2421                 return rte_flow_error_set
2422                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2423                          "GTP E flag must be 1 to match GTP PSC");
2424         /* Check the flow is not created in group zero. */
2425         if (!attr->transfer && !attr->group)
2426                 return rte_flow_error_set
2427                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2428                          "GTP PSC is not supported for group 0");
2429         /* GTP spec is here and E flag is requested to match zero. */
2430         if (!item->spec)
2431                 return 0;
2432         spec = item->spec;
2433         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2434         if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE)
2435                 return rte_flow_error_set
2436                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2437                          "PDU type should be smaller than 16");
2438         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2439                                          (const uint8_t *)&nic_mask,
2440                                          sizeof(struct rte_flow_item_gtp_psc),
2441                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2442 }
2443
2444 /**
2445  * Validate IPV4 item.
2446  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2447  * add specific validation of fragment_offset field,
2448  *
2449  * @param[in] item
2450  *   Item specification.
2451  * @param[in] item_flags
2452  *   Bit-fields that holds the items detected until now.
2453  * @param[out] error
2454  *   Pointer to error structure.
2455  *
2456  * @return
2457  *   0 on success, a negative errno value otherwise and rte_errno is set.
2458  */
2459 static int
2460 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
2461                            uint64_t item_flags,
2462                            uint64_t last_item,
2463                            uint16_t ether_type,
2464                            struct rte_flow_error *error)
2465 {
2466         int ret;
2467         const struct rte_flow_item_ipv4 *spec = item->spec;
2468         const struct rte_flow_item_ipv4 *last = item->last;
2469         const struct rte_flow_item_ipv4 *mask = item->mask;
2470         rte_be16_t fragment_offset_spec = 0;
2471         rte_be16_t fragment_offset_last = 0;
2472         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
2473                 .hdr = {
2474                         .src_addr = RTE_BE32(0xffffffff),
2475                         .dst_addr = RTE_BE32(0xffffffff),
2476                         .type_of_service = 0xff,
2477                         .fragment_offset = RTE_BE16(0xffff),
2478                         .next_proto_id = 0xff,
2479                         .time_to_live = 0xff,
2480                 },
2481         };
2482
2483         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2484                                            ether_type, &nic_ipv4_mask,
2485                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2486         if (ret < 0)
2487                 return ret;
2488         if (spec && mask)
2489                 fragment_offset_spec = spec->hdr.fragment_offset &
2490                                        mask->hdr.fragment_offset;
2491         if (!fragment_offset_spec)
2492                 return 0;
2493         /*
2494          * spec and mask are valid, enforce using full mask to make sure the
2495          * complete value is used correctly.
2496          */
2497         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2498                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2499                 return rte_flow_error_set(error, EINVAL,
2500                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2501                                           item, "must use full mask for"
2502                                           " fragment_offset");
2503         /*
2504          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2505          * indicating this is 1st fragment of fragmented packet.
2506          * This is not yet supported in MLX5, return appropriate error message.
2507          */
2508         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2509                 return rte_flow_error_set(error, ENOTSUP,
2510                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2511                                           "match on first fragment not "
2512                                           "supported");
2513         if (fragment_offset_spec && !last)
2514                 return rte_flow_error_set(error, ENOTSUP,
2515                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2516                                           "specified value not supported");
2517         /* spec and last are valid, validate the specified range. */
2518         fragment_offset_last = last->hdr.fragment_offset &
2519                                mask->hdr.fragment_offset;
2520         /*
2521          * Match on fragment_offset spec 0x2001 and last 0x3fff
2522          * means MF is 1 and frag-offset is > 0.
2523          * This packet is fragment 2nd and onward, excluding last.
2524          * This is not yet supported in MLX5, return appropriate
2525          * error message.
2526          */
2527         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2528             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2529                 return rte_flow_error_set(error, ENOTSUP,
2530                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2531                                           last, "match on following "
2532                                           "fragments not supported");
2533         /*
2534          * Match on fragment_offset spec 0x0001 and last 0x1fff
2535          * means MF is 0 and frag-offset is > 0.
2536          * This packet is last fragment of fragmented packet.
2537          * This is not yet supported in MLX5, return appropriate
2538          * error message.
2539          */
2540         if (fragment_offset_spec == RTE_BE16(1) &&
2541             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2542                 return rte_flow_error_set(error, ENOTSUP,
2543                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2544                                           last, "match on last "
2545                                           "fragment not supported");
2546         /*
2547          * Match on fragment_offset spec 0x0001 and last 0x3fff
2548          * means MF and/or frag-offset is not 0.
2549          * This is a fragmented packet.
2550          * Other range values are invalid and rejected.
2551          */
2552         if (!(fragment_offset_spec == RTE_BE16(1) &&
2553               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2554                 return rte_flow_error_set(error, ENOTSUP,
2555                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2556                                           "specified range not supported");
2557         return 0;
2558 }
2559
2560 /**
2561  * Validate IPV6 fragment extension item.
2562  *
2563  * @param[in] item
2564  *   Item specification.
2565  * @param[in] item_flags
2566  *   Bit-fields that holds the items detected until now.
2567  * @param[out] error
2568  *   Pointer to error structure.
2569  *
2570  * @return
2571  *   0 on success, a negative errno value otherwise and rte_errno is set.
2572  */
2573 static int
2574 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2575                                     uint64_t item_flags,
2576                                     struct rte_flow_error *error)
2577 {
2578         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2579         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2580         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2581         rte_be16_t frag_data_spec = 0;
2582         rte_be16_t frag_data_last = 0;
2583         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2584         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2585                                       MLX5_FLOW_LAYER_OUTER_L4;
2586         int ret = 0;
2587         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2588                 .hdr = {
2589                         .next_header = 0xff,
2590                         .frag_data = RTE_BE16(0xffff),
2591                 },
2592         };
2593
2594         if (item_flags & l4m)
2595                 return rte_flow_error_set(error, EINVAL,
2596                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2597                                           "ipv6 fragment extension item cannot "
2598                                           "follow L4 item.");
2599         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2600             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2601                 return rte_flow_error_set(error, EINVAL,
2602                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2603                                           "ipv6 fragment extension item must "
2604                                           "follow ipv6 item");
2605         if (spec && mask)
2606                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2607         if (!frag_data_spec)
2608                 return 0;
2609         /*
2610          * spec and mask are valid, enforce using full mask to make sure the
2611          * complete value is used correctly.
2612          */
2613         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2614                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2615                 return rte_flow_error_set(error, EINVAL,
2616                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2617                                           item, "must use full mask for"
2618                                           " frag_data");
2619         /*
2620          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2621          * This is 1st fragment of fragmented packet.
2622          */
2623         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2624                 return rte_flow_error_set(error, ENOTSUP,
2625                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2626                                           "match on first fragment not "
2627                                           "supported");
2628         if (frag_data_spec && !last)
2629                 return rte_flow_error_set(error, EINVAL,
2630                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2631                                           "specified value not supported");
2632         ret = mlx5_flow_item_acceptable
2633                                 (item, (const uint8_t *)mask,
2634                                  (const uint8_t *)&nic_mask,
2635                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2636                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2637         if (ret)
2638                 return ret;
2639         /* spec and last are valid, validate the specified range. */
2640         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2641         /*
2642          * Match on frag_data spec 0x0009 and last 0xfff9
2643          * means M is 1 and frag-offset is > 0.
2644          * This packet is fragment 2nd and onward, excluding last.
2645          * This is not yet supported in MLX5, return appropriate
2646          * error message.
2647          */
2648         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2649                                        RTE_IPV6_EHDR_MF_MASK) &&
2650             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2651                 return rte_flow_error_set(error, ENOTSUP,
2652                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2653                                           last, "match on following "
2654                                           "fragments not supported");
2655         /*
2656          * Match on frag_data spec 0x0008 and last 0xfff8
2657          * means M is 0 and frag-offset is > 0.
2658          * This packet is last fragment of fragmented packet.
2659          * This is not yet supported in MLX5, return appropriate
2660          * error message.
2661          */
2662         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2663             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2664                 return rte_flow_error_set(error, ENOTSUP,
2665                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2666                                           last, "match on last "
2667                                           "fragment not supported");
2668         /* Other range values are invalid and rejected. */
2669         return rte_flow_error_set(error, EINVAL,
2670                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2671                                   "specified range not supported");
2672 }
2673
2674 /*
2675  * Validate ASO CT item.
2676  *
2677  * @param[in] dev
2678  *   Pointer to the rte_eth_dev structure.
2679  * @param[in] item
2680  *   Item specification.
2681  * @param[in] item_flags
2682  *   Pointer to bit-fields that holds the items detected until now.
2683  * @param[out] error
2684  *   Pointer to error structure.
2685  *
2686  * @return
2687  *   0 on success, a negative errno value otherwise and rte_errno is set.
2688  */
2689 static int
2690 flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
2691                              const struct rte_flow_item *item,
2692                              uint64_t *item_flags,
2693                              struct rte_flow_error *error)
2694 {
2695         const struct rte_flow_item_conntrack *spec = item->spec;
2696         const struct rte_flow_item_conntrack *mask = item->mask;
2697         RTE_SET_USED(dev);
2698         uint32_t flags;
2699
2700         if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
2701                 return rte_flow_error_set(error, EINVAL,
2702                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2703                                           "Only one CT is supported");
2704         if (!mask)
2705                 mask = &rte_flow_item_conntrack_mask;
2706         flags = spec->flags & mask->flags;
2707         if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
2708             ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
2709              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
2710              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
2711                 return rte_flow_error_set(error, EINVAL,
2712                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2713                                           "Conflict status bits");
2714         /* State change also needs to be considered. */
2715         *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
2716         return 0;
2717 }
2718
2719 /**
2720  * Validate the pop VLAN action.
2721  *
2722  * @param[in] dev
2723  *   Pointer to the rte_eth_dev structure.
2724  * @param[in] action_flags
2725  *   Holds the actions detected until now.
2726  * @param[in] action
2727  *   Pointer to the pop vlan action.
2728  * @param[in] item_flags
2729  *   The items found in this flow rule.
2730  * @param[in] attr
2731  *   Pointer to flow attributes.
2732  * @param[out] error
2733  *   Pointer to error structure.
2734  *
2735  * @return
2736  *   0 on success, a negative errno value otherwise and rte_errno is set.
2737  */
2738 static int
2739 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2740                                  uint64_t action_flags,
2741                                  const struct rte_flow_action *action,
2742                                  uint64_t item_flags,
2743                                  const struct rte_flow_attr *attr,
2744                                  struct rte_flow_error *error)
2745 {
2746         const struct mlx5_priv *priv = dev->data->dev_private;
2747
2748         (void)action;
2749         (void)attr;
2750         if (!priv->sh->pop_vlan_action)
2751                 return rte_flow_error_set(error, ENOTSUP,
2752                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2753                                           NULL,
2754                                           "pop vlan action is not supported");
2755         if (attr->egress)
2756                 return rte_flow_error_set(error, ENOTSUP,
2757                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2758                                           NULL,
2759                                           "pop vlan action not supported for "
2760                                           "egress");
2761         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2762                 return rte_flow_error_set(error, ENOTSUP,
2763                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2764                                           "no support for multiple VLAN "
2765                                           "actions");
2766         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2767         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2768             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2769                 return rte_flow_error_set(error, ENOTSUP,
2770                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2771                                           NULL,
2772                                           "cannot pop vlan after decap without "
2773                                           "match on inner vlan in the flow");
2774         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2775         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2776             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2777                 return rte_flow_error_set(error, ENOTSUP,
2778                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2779                                           NULL,
2780                                           "cannot pop vlan without a "
2781                                           "match on (outer) vlan in the flow");
2782         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2783                 return rte_flow_error_set(error, EINVAL,
2784                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2785                                           "wrong action order, port_id should "
2786                                           "be after pop VLAN action");
2787         if (!attr->transfer && priv->representor)
2788                 return rte_flow_error_set(error, ENOTSUP,
2789                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2790                                           "pop vlan action for VF representor "
2791                                           "not supported on NIC table");
2792         return 0;
2793 }
2794
2795 /**
2796  * Get VLAN default info from vlan match info.
2797  *
2798  * @param[in] items
2799  *   the list of item specifications.
2800  * @param[out] vlan
2801  *   pointer VLAN info to fill to.
2802  *
2803  * @return
2804  *   0 on success, a negative errno value otherwise and rte_errno is set.
2805  */
2806 static void
2807 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2808                                   struct rte_vlan_hdr *vlan)
2809 {
2810         const struct rte_flow_item_vlan nic_mask = {
2811                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2812                                 MLX5DV_FLOW_VLAN_VID_MASK),
2813                 .inner_type = RTE_BE16(0xffff),
2814         };
2815
2816         if (items == NULL)
2817                 return;
2818         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2819                 int type = items->type;
2820
2821                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2822                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2823                         break;
2824         }
2825         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2826                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2827                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2828
2829                 /* If VLAN item in pattern doesn't contain data, return here. */
2830                 if (!vlan_v)
2831                         return;
2832                 if (!vlan_m)
2833                         vlan_m = &nic_mask;
2834                 /* Only full match values are accepted */
2835                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2836                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2837                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2838                         vlan->vlan_tci |=
2839                                 rte_be_to_cpu_16(vlan_v->tci &
2840                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2841                 }
2842                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2843                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2844                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2845                         vlan->vlan_tci |=
2846                                 rte_be_to_cpu_16(vlan_v->tci &
2847                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2848                 }
2849                 if (vlan_m->inner_type == nic_mask.inner_type)
2850                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2851                                                            vlan_m->inner_type);
2852         }
2853 }
2854
2855 /**
2856  * Validate the push VLAN action.
2857  *
2858  * @param[in] dev
2859  *   Pointer to the rte_eth_dev structure.
2860  * @param[in] action_flags
2861  *   Holds the actions detected until now.
2862  * @param[in] item_flags
2863  *   The items found in this flow rule.
2864  * @param[in] action
2865  *   Pointer to the action structure.
2866  * @param[in] attr
2867  *   Pointer to flow attributes
2868  * @param[out] error
2869  *   Pointer to error structure.
2870  *
2871  * @return
2872  *   0 on success, a negative errno value otherwise and rte_errno is set.
2873  */
2874 static int
2875 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2876                                   uint64_t action_flags,
2877                                   const struct rte_flow_item_vlan *vlan_m,
2878                                   const struct rte_flow_action *action,
2879                                   const struct rte_flow_attr *attr,
2880                                   struct rte_flow_error *error)
2881 {
2882         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2883         const struct mlx5_priv *priv = dev->data->dev_private;
2884
2885         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2886             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2887                 return rte_flow_error_set(error, EINVAL,
2888                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2889                                           "invalid vlan ethertype");
2890         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2891                 return rte_flow_error_set(error, EINVAL,
2892                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2893                                           "wrong action order, port_id should "
2894                                           "be after push VLAN");
2895         if (!attr->transfer && priv->representor)
2896                 return rte_flow_error_set(error, ENOTSUP,
2897                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2898                                           "push vlan action for VF representor "
2899                                           "not supported on NIC table");
2900         if (vlan_m &&
2901             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2902             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2903                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2904             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2905             !(mlx5_flow_find_action
2906                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2907                 return rte_flow_error_set(error, EINVAL,
2908                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2909                                           "not full match mask on VLAN PCP and "
2910                                           "there is no of_set_vlan_pcp action, "
2911                                           "push VLAN action cannot figure out "
2912                                           "PCP value");
2913         if (vlan_m &&
2914             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2915             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2916                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2917             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2918             !(mlx5_flow_find_action
2919                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2920                 return rte_flow_error_set(error, EINVAL,
2921                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2922                                           "not full match mask on VLAN VID and "
2923                                           "there is no of_set_vlan_vid action, "
2924                                           "push VLAN action cannot figure out "
2925                                           "VID value");
2926         (void)attr;
2927         return 0;
2928 }
2929
2930 /**
2931  * Validate the set VLAN PCP.
2932  *
2933  * @param[in] action_flags
2934  *   Holds the actions detected until now.
2935  * @param[in] actions
2936  *   Pointer to the list of actions remaining in the flow rule.
2937  * @param[out] error
2938  *   Pointer to error structure.
2939  *
2940  * @return
2941  *   0 on success, a negative errno value otherwise and rte_errno is set.
2942  */
2943 static int
2944 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2945                                      const struct rte_flow_action actions[],
2946                                      struct rte_flow_error *error)
2947 {
2948         const struct rte_flow_action *action = actions;
2949         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2950
2951         if (conf->vlan_pcp > 7)
2952                 return rte_flow_error_set(error, EINVAL,
2953                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2954                                           "VLAN PCP value is too big");
2955         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2956                 return rte_flow_error_set(error, ENOTSUP,
2957                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2958                                           "set VLAN PCP action must follow "
2959                                           "the push VLAN action");
2960         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2961                 return rte_flow_error_set(error, ENOTSUP,
2962                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2963                                           "Multiple VLAN PCP modification are "
2964                                           "not supported");
2965         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2966                 return rte_flow_error_set(error, EINVAL,
2967                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2968                                           "wrong action order, port_id should "
2969                                           "be after set VLAN PCP");
2970         return 0;
2971 }
2972
2973 /**
2974  * Validate the set VLAN VID.
2975  *
2976  * @param[in] item_flags
2977  *   Holds the items detected in this rule.
2978  * @param[in] action_flags
2979  *   Holds the actions detected until now.
2980  * @param[in] actions
2981  *   Pointer to the list of actions remaining in the flow rule.
2982  * @param[out] error
2983  *   Pointer to error structure.
2984  *
2985  * @return
2986  *   0 on success, a negative errno value otherwise and rte_errno is set.
2987  */
2988 static int
2989 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2990                                      uint64_t action_flags,
2991                                      const struct rte_flow_action actions[],
2992                                      struct rte_flow_error *error)
2993 {
2994         const struct rte_flow_action *action = actions;
2995         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2996
2997         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2998                 return rte_flow_error_set(error, EINVAL,
2999                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3000                                           "VLAN VID value is too big");
3001         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
3002             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
3003                 return rte_flow_error_set(error, ENOTSUP,
3004                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3005                                           "set VLAN VID action must follow push"
3006                                           " VLAN action or match on VLAN item");
3007         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
3008                 return rte_flow_error_set(error, ENOTSUP,
3009                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3010                                           "Multiple VLAN VID modifications are "
3011                                           "not supported");
3012         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
3013                 return rte_flow_error_set(error, EINVAL,
3014                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3015                                           "wrong action order, port_id should "
3016                                           "be after set VLAN VID");
3017         return 0;
3018 }
3019
3020 /*
3021  * Validate the FLAG action.
3022  *
3023  * @param[in] dev
3024  *   Pointer to the rte_eth_dev structure.
3025  * @param[in] action_flags
3026  *   Holds the actions detected until now.
3027  * @param[in] attr
3028  *   Pointer to flow attributes
3029  * @param[out] error
3030  *   Pointer to error structure.
3031  *
3032  * @return
3033  *   0 on success, a negative errno value otherwise and rte_errno is set.
3034  */
3035 static int
3036 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
3037                              uint64_t action_flags,
3038                              const struct rte_flow_attr *attr,
3039                              struct rte_flow_error *error)
3040 {
3041         struct mlx5_priv *priv = dev->data->dev_private;
3042         struct mlx5_dev_config *config = &priv->config;
3043         int ret;
3044
3045         /* Fall back if no extended metadata register support. */
3046         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3047                 return mlx5_flow_validate_action_flag(action_flags, attr,
3048                                                       error);
3049         /* Extensive metadata mode requires registers. */
3050         if (!mlx5_flow_ext_mreg_supported(dev))
3051                 return rte_flow_error_set(error, ENOTSUP,
3052                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3053                                           "no metadata registers "
3054                                           "to support flag action");
3055         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
3056                 return rte_flow_error_set(error, ENOTSUP,
3057                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3058                                           "extended metadata register"
3059                                           " isn't available");
3060         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3061         if (ret < 0)
3062                 return ret;
3063         MLX5_ASSERT(ret > 0);
3064         if (action_flags & MLX5_FLOW_ACTION_MARK)
3065                 return rte_flow_error_set(error, EINVAL,
3066                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3067                                           "can't mark and flag in same flow");
3068         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3069                 return rte_flow_error_set(error, EINVAL,
3070                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3071                                           "can't have 2 flag"
3072                                           " actions in same flow");
3073         return 0;
3074 }
3075
3076 /**
3077  * Validate MARK action.
3078  *
3079  * @param[in] dev
3080  *   Pointer to the rte_eth_dev structure.
3081  * @param[in] action
3082  *   Pointer to action.
3083  * @param[in] action_flags
3084  *   Holds the actions detected until now.
3085  * @param[in] attr
3086  *   Pointer to flow attributes
3087  * @param[out] error
3088  *   Pointer to error structure.
3089  *
3090  * @return
3091  *   0 on success, a negative errno value otherwise and rte_errno is set.
3092  */
3093 static int
3094 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
3095                              const struct rte_flow_action *action,
3096                              uint64_t action_flags,
3097                              const struct rte_flow_attr *attr,
3098                              struct rte_flow_error *error)
3099 {
3100         struct mlx5_priv *priv = dev->data->dev_private;
3101         struct mlx5_dev_config *config = &priv->config;
3102         const struct rte_flow_action_mark *mark = action->conf;
3103         int ret;
3104
3105         if (is_tunnel_offload_active(dev))
3106                 return rte_flow_error_set(error, ENOTSUP,
3107                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3108                                           "no mark action "
3109                                           "if tunnel offload active");
3110         /* Fall back if no extended metadata register support. */
3111         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3112                 return mlx5_flow_validate_action_mark(action, action_flags,
3113                                                       attr, error);
3114         /* Extensive metadata mode requires registers. */
3115         if (!mlx5_flow_ext_mreg_supported(dev))
3116                 return rte_flow_error_set(error, ENOTSUP,
3117                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3118                                           "no metadata registers "
3119                                           "to support mark action");
3120         if (!priv->sh->dv_mark_mask)
3121                 return rte_flow_error_set(error, ENOTSUP,
3122                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3123                                           "extended metadata register"
3124                                           " isn't available");
3125         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3126         if (ret < 0)
3127                 return ret;
3128         MLX5_ASSERT(ret > 0);
3129         if (!mark)
3130                 return rte_flow_error_set(error, EINVAL,
3131                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3132                                           "configuration cannot be null");
3133         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3134                 return rte_flow_error_set(error, EINVAL,
3135                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3136                                           &mark->id,
3137                                           "mark id exceeds the limit");
3138         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3139                 return rte_flow_error_set(error, EINVAL,
3140                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3141                                           "can't flag and mark in same flow");
3142         if (action_flags & MLX5_FLOW_ACTION_MARK)
3143                 return rte_flow_error_set(error, EINVAL,
3144                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3145                                           "can't have 2 mark actions in same"
3146                                           " flow");
3147         return 0;
3148 }
3149
3150 /**
3151  * Validate SET_META action.
3152  *
3153  * @param[in] dev
3154  *   Pointer to the rte_eth_dev structure.
3155  * @param[in] action
3156  *   Pointer to the action structure.
3157  * @param[in] action_flags
3158  *   Holds the actions detected until now.
3159  * @param[in] attr
3160  *   Pointer to flow attributes
3161  * @param[out] error
3162  *   Pointer to error structure.
3163  *
3164  * @return
3165  *   0 on success, a negative errno value otherwise and rte_errno is set.
3166  */
3167 static int
3168 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3169                                  const struct rte_flow_action *action,
3170                                  uint64_t action_flags __rte_unused,
3171                                  const struct rte_flow_attr *attr,
3172                                  struct rte_flow_error *error)
3173 {
3174         const struct rte_flow_action_set_meta *conf;
3175         uint32_t nic_mask = UINT32_MAX;
3176         int reg;
3177
3178         if (!mlx5_flow_ext_mreg_supported(dev))
3179                 return rte_flow_error_set(error, ENOTSUP,
3180                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3181                                           "extended metadata register"
3182                                           " isn't supported");
3183         reg = flow_dv_get_metadata_reg(dev, attr, error);
3184         if (reg < 0)
3185                 return reg;
3186         if (reg == REG_NON)
3187                 return rte_flow_error_set(error, ENOTSUP,
3188                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3189                                           "unavalable extended metadata register");
3190         if (reg != REG_A && reg != REG_B) {
3191                 struct mlx5_priv *priv = dev->data->dev_private;
3192
3193                 nic_mask = priv->sh->dv_meta_mask;
3194         }
3195         if (!(action->conf))
3196                 return rte_flow_error_set(error, EINVAL,
3197                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3198                                           "configuration cannot be null");
3199         conf = (const struct rte_flow_action_set_meta *)action->conf;
3200         if (!conf->mask)
3201                 return rte_flow_error_set(error, EINVAL,
3202                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3203                                           "zero mask doesn't have any effect");
3204         if (conf->mask & ~nic_mask)
3205                 return rte_flow_error_set(error, EINVAL,
3206                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3207                                           "meta data must be within reg C0");
3208         return 0;
3209 }
3210
3211 /**
3212  * Validate SET_TAG action.
3213  *
3214  * @param[in] dev
3215  *   Pointer to the rte_eth_dev structure.
3216  * @param[in] action
3217  *   Pointer to the action structure.
3218  * @param[in] action_flags
3219  *   Holds the actions detected until now.
3220  * @param[in] attr
3221  *   Pointer to flow attributes
3222  * @param[out] error
3223  *   Pointer to error structure.
3224  *
3225  * @return
3226  *   0 on success, a negative errno value otherwise and rte_errno is set.
3227  */
3228 static int
3229 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3230                                 const struct rte_flow_action *action,
3231                                 uint64_t action_flags,
3232                                 const struct rte_flow_attr *attr,
3233                                 struct rte_flow_error *error)
3234 {
3235         const struct rte_flow_action_set_tag *conf;
3236         const uint64_t terminal_action_flags =
3237                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3238                 MLX5_FLOW_ACTION_RSS;
3239         int ret;
3240
3241         if (!mlx5_flow_ext_mreg_supported(dev))
3242                 return rte_flow_error_set(error, ENOTSUP,
3243                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3244                                           "extensive metadata register"
3245                                           " isn't supported");
3246         if (!(action->conf))
3247                 return rte_flow_error_set(error, EINVAL,
3248                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3249                                           "configuration cannot be null");
3250         conf = (const struct rte_flow_action_set_tag *)action->conf;
3251         if (!conf->mask)
3252                 return rte_flow_error_set(error, EINVAL,
3253                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3254                                           "zero mask doesn't have any effect");
3255         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3256         if (ret < 0)
3257                 return ret;
3258         if (!attr->transfer && attr->ingress &&
3259             (action_flags & terminal_action_flags))
3260                 return rte_flow_error_set(error, EINVAL,
3261                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3262                                           "set_tag has no effect"
3263                                           " with terminal actions");
3264         return 0;
3265 }
3266
3267 /**
3268  * Check if action counter is shared by either old or new mechanism.
3269  *
3270  * @param[in] action
3271  *   Pointer to the action structure.
3272  *
3273  * @return
3274  *   True when counter is shared, false otherwise.
3275  */
3276 static inline bool
3277 is_shared_action_count(const struct rte_flow_action *action)
3278 {
3279         const struct rte_flow_action_count *count =
3280                         (const struct rte_flow_action_count *)action->conf;
3281
3282         if ((int)action->type == MLX5_RTE_FLOW_ACTION_TYPE_COUNT)
3283                 return true;
3284         return !!(count && count->shared);
3285 }
3286
3287 /**
3288  * Validate count action.
3289  *
3290  * @param[in] dev
3291  *   Pointer to rte_eth_dev structure.
3292  * @param[in] shared
3293  *   Indicator if action is shared.
3294  * @param[in] action_flags
3295  *   Holds the actions detected until now.
3296  * @param[out] error
3297  *   Pointer to error structure.
3298  *
3299  * @return
3300  *   0 on success, a negative errno value otherwise and rte_errno is set.
3301  */
3302 static int
3303 flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
3304                               uint64_t action_flags,
3305                               struct rte_flow_error *error)
3306 {
3307         struct mlx5_priv *priv = dev->data->dev_private;
3308
3309         if (!priv->config.devx)
3310                 goto notsup_err;
3311         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3312                 return rte_flow_error_set(error, EINVAL,
3313                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3314                                           "duplicate count actions set");
3315         if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3316             !priv->sh->flow_hit_aso_en)
3317                 return rte_flow_error_set(error, EINVAL,
3318                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3319                                           "old age and shared count combination is not supported");
3320 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3321         return 0;
3322 #endif
3323 notsup_err:
3324         return rte_flow_error_set
3325                       (error, ENOTSUP,
3326                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3327                        NULL,
3328                        "count action not supported");
3329 }
3330
3331 /**
3332  * Validate the L2 encap action.
3333  *
3334  * @param[in] dev
3335  *   Pointer to the rte_eth_dev structure.
3336  * @param[in] action_flags
3337  *   Holds the actions detected until now.
3338  * @param[in] action
3339  *   Pointer to the action structure.
3340  * @param[in] attr
3341  *   Pointer to flow attributes.
3342  * @param[out] error
3343  *   Pointer to error structure.
3344  *
3345  * @return
3346  *   0 on success, a negative errno value otherwise and rte_errno is set.
3347  */
3348 static int
3349 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3350                                  uint64_t action_flags,
3351                                  const struct rte_flow_action *action,
3352                                  const struct rte_flow_attr *attr,
3353                                  struct rte_flow_error *error)
3354 {
3355         const struct mlx5_priv *priv = dev->data->dev_private;
3356
3357         if (!(action->conf))
3358                 return rte_flow_error_set(error, EINVAL,
3359                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3360                                           "configuration cannot be null");
3361         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3362                 return rte_flow_error_set(error, EINVAL,
3363                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3364                                           "can only have a single encap action "
3365                                           "in a flow");
3366         if (!attr->transfer && priv->representor)
3367                 return rte_flow_error_set(error, ENOTSUP,
3368                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3369                                           "encap action for VF representor "
3370                                           "not supported on NIC table");
3371         return 0;
3372 }
3373
3374 /**
3375  * Validate a decap action.
3376  *
3377  * @param[in] dev
3378  *   Pointer to the rte_eth_dev structure.
3379  * @param[in] action_flags
3380  *   Holds the actions detected until now.
3381  * @param[in] action
3382  *   Pointer to the action structure.
3383  * @param[in] item_flags
3384  *   Holds the items detected.
3385  * @param[in] attr
3386  *   Pointer to flow attributes
3387  * @param[out] error
3388  *   Pointer to error structure.
3389  *
3390  * @return
3391  *   0 on success, a negative errno value otherwise and rte_errno is set.
3392  */
3393 static int
3394 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3395                               uint64_t action_flags,
3396                               const struct rte_flow_action *action,
3397                               const uint64_t item_flags,
3398                               const struct rte_flow_attr *attr,
3399                               struct rte_flow_error *error)
3400 {
3401         const struct mlx5_priv *priv = dev->data->dev_private;
3402
3403         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
3404             !priv->config.decap_en)
3405                 return rte_flow_error_set(error, ENOTSUP,
3406                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3407                                           "decap is not enabled");
3408         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3409                 return rte_flow_error_set(error, ENOTSUP,
3410                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3411                                           action_flags &
3412                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3413                                           "have a single decap action" : "decap "
3414                                           "after encap is not supported");
3415         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3416                 return rte_flow_error_set(error, EINVAL,
3417                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3418                                           "can't have decap action after"
3419                                           " modify action");
3420         if (attr->egress)
3421                 return rte_flow_error_set(error, ENOTSUP,
3422                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3423                                           NULL,
3424                                           "decap action not supported for "
3425                                           "egress");
3426         if (!attr->transfer && priv->representor)
3427                 return rte_flow_error_set(error, ENOTSUP,
3428                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3429                                           "decap action for VF representor "
3430                                           "not supported on NIC table");
3431         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3432             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3433                 return rte_flow_error_set(error, ENOTSUP,
3434                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3435                                 "VXLAN item should be present for VXLAN decap");
3436         return 0;
3437 }
3438
3439 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3440
3441 /**
3442  * Validate the raw encap and decap actions.
3443  *
3444  * @param[in] dev
3445  *   Pointer to the rte_eth_dev structure.
3446  * @param[in] decap
3447  *   Pointer to the decap action.
3448  * @param[in] encap
3449  *   Pointer to the encap action.
3450  * @param[in] attr
3451  *   Pointer to flow attributes
3452  * @param[in/out] action_flags
3453  *   Holds the actions detected until now.
3454  * @param[out] actions_n
3455  *   pointer to the number of actions counter.
3456  * @param[in] action
3457  *   Pointer to the action structure.
3458  * @param[in] item_flags
3459  *   Holds the items detected.
3460  * @param[out] error
3461  *   Pointer to error structure.
3462  *
3463  * @return
3464  *   0 on success, a negative errno value otherwise and rte_errno is set.
3465  */
3466 static int
3467 flow_dv_validate_action_raw_encap_decap
3468         (struct rte_eth_dev *dev,
3469          const struct rte_flow_action_raw_decap *decap,
3470          const struct rte_flow_action_raw_encap *encap,
3471          const struct rte_flow_attr *attr, uint64_t *action_flags,
3472          int *actions_n, const struct rte_flow_action *action,
3473          uint64_t item_flags, struct rte_flow_error *error)
3474 {
3475         const struct mlx5_priv *priv = dev->data->dev_private;
3476         int ret;
3477
3478         if (encap && (!encap->size || !encap->data))
3479                 return rte_flow_error_set(error, EINVAL,
3480                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3481                                           "raw encap data cannot be empty");
3482         if (decap && encap) {
3483                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3484                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3485                         /* L3 encap. */
3486                         decap = NULL;
3487                 else if (encap->size <=
3488                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3489                            decap->size >
3490                            MLX5_ENCAPSULATION_DECISION_SIZE)
3491                         /* L3 decap. */
3492                         encap = NULL;
3493                 else if (encap->size >
3494                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3495                            decap->size >
3496                            MLX5_ENCAPSULATION_DECISION_SIZE)
3497                         /* 2 L2 actions: encap and decap. */
3498                         ;
3499                 else
3500                         return rte_flow_error_set(error,
3501                                 ENOTSUP,
3502                                 RTE_FLOW_ERROR_TYPE_ACTION,
3503                                 NULL, "unsupported too small "
3504                                 "raw decap and too small raw "
3505                                 "encap combination");
3506         }
3507         if (decap) {
3508                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3509                                                     item_flags, attr, error);
3510                 if (ret < 0)
3511                         return ret;
3512                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3513                 ++(*actions_n);
3514         }
3515         if (encap) {
3516                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3517                         return rte_flow_error_set(error, ENOTSUP,
3518                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3519                                                   NULL,
3520                                                   "small raw encap size");
3521                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3522                         return rte_flow_error_set(error, EINVAL,
3523                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3524                                                   NULL,
3525                                                   "more than one encap action");
3526                 if (!attr->transfer && priv->representor)
3527                         return rte_flow_error_set
3528                                         (error, ENOTSUP,
3529                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3530                                          "encap action for VF representor "
3531                                          "not supported on NIC table");
3532                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3533                 ++(*actions_n);
3534         }
3535         return 0;
3536 }
3537
3538 /*
3539  * Validate the ASO CT action.
3540  *
3541  * @param[in] dev
3542  *   Pointer to the rte_eth_dev structure.
3543  * @param[in] action_flags
3544  *   Holds the actions detected until now.
3545  * @param[in] item_flags
3546  *   The items found in this flow rule.
3547  * @param[in] attr
3548  *   Pointer to flow attributes.
3549  * @param[out] error
3550  *   Pointer to error structure.
3551  *
3552  * @return
3553  *   0 on success, a negative errno value otherwise and rte_errno is set.
3554  */
3555 static int
3556 flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3557                                uint64_t action_flags,
3558                                uint64_t item_flags,
3559                                const struct rte_flow_attr *attr,
3560                                struct rte_flow_error *error)
3561 {
3562         RTE_SET_USED(dev);
3563
3564         if (attr->group == 0 && !attr->transfer)
3565                 return rte_flow_error_set(error, ENOTSUP,
3566                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3567                                           NULL,
3568                                           "Only support non-root table");
3569         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
3570                 return rte_flow_error_set(error, ENOTSUP,
3571                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3572                                           "CT cannot follow a fate action");
3573         if ((action_flags & MLX5_FLOW_ACTION_METER) ||
3574             (action_flags & MLX5_FLOW_ACTION_AGE))
3575                 return rte_flow_error_set(error, EINVAL,
3576                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3577                                           "Only one ASO action is supported");
3578         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3579                 return rte_flow_error_set(error, EINVAL,
3580                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3581                                           "Encap cannot exist before CT");
3582         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3583                 return rte_flow_error_set(error, EINVAL,
3584                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3585                                           "Not a outer TCP packet");
3586         return 0;
3587 }
3588
3589 /**
3590  * Match encap_decap resource.
3591  *
3592  * @param list
3593  *   Pointer to the hash list.
3594  * @param entry
3595  *   Pointer to exist resource entry object.
3596  * @param key
3597  *   Key of the new entry.
3598  * @param ctx_cb
3599  *   Pointer to new encap_decap resource.
3600  *
3601  * @return
3602  *   0 on matching, none-zero otherwise.
3603  */
3604 int
3605 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
3606                              struct mlx5_hlist_entry *entry,
3607                              uint64_t key __rte_unused, void *cb_ctx)
3608 {
3609         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3610         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3611         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3612
3613         cache_resource = container_of(entry,
3614                                       struct mlx5_flow_dv_encap_decap_resource,
3615                                       entry);
3616         if (resource->reformat_type == cache_resource->reformat_type &&
3617             resource->ft_type == cache_resource->ft_type &&
3618             resource->flags == cache_resource->flags &&
3619             resource->size == cache_resource->size &&
3620             !memcmp((const void *)resource->buf,
3621                     (const void *)cache_resource->buf,
3622                     resource->size))
3623                 return 0;
3624         return -1;
3625 }
3626
3627 /**
3628  * Allocate encap_decap resource.
3629  *
3630  * @param list
3631  *   Pointer to the hash list.
3632  * @param entry
3633  *   Pointer to exist resource entry object.
3634  * @param ctx_cb
3635  *   Pointer to new encap_decap resource.
3636  *
3637  * @return
3638  *   0 on matching, none-zero otherwise.
3639  */
3640 struct mlx5_hlist_entry *
3641 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
3642                               uint64_t key __rte_unused,
3643                               void *cb_ctx)
3644 {
3645         struct mlx5_dev_ctx_shared *sh = list->ctx;
3646         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3647         struct mlx5dv_dr_domain *domain;
3648         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3649         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3650         uint32_t idx;
3651         int ret;
3652
3653         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3654                 domain = sh->fdb_domain;
3655         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3656                 domain = sh->rx_domain;
3657         else
3658                 domain = sh->tx_domain;
3659         /* Register new encap/decap resource. */
3660         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3661                                        &idx);
3662         if (!cache_resource) {
3663                 rte_flow_error_set(ctx->error, ENOMEM,
3664                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3665                                    "cannot allocate resource memory");
3666                 return NULL;
3667         }
3668         *cache_resource = *resource;
3669         cache_resource->idx = idx;
3670         ret = mlx5_flow_os_create_flow_action_packet_reformat
3671                                         (sh->ctx, domain, cache_resource,
3672                                          &cache_resource->action);
3673         if (ret) {
3674                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3675                 rte_flow_error_set(ctx->error, ENOMEM,
3676                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3677                                    NULL, "cannot create action");
3678                 return NULL;
3679         }
3680
3681         return &cache_resource->entry;
3682 }
3683
3684 /**
3685  * Find existing encap/decap resource or create and register a new one.
3686  *
3687  * @param[in, out] dev
3688  *   Pointer to rte_eth_dev structure.
3689  * @param[in, out] resource
3690  *   Pointer to encap/decap resource.
3691  * @parm[in, out] dev_flow
3692  *   Pointer to the dev_flow.
3693  * @param[out] error
3694  *   pointer to error structure.
3695  *
3696  * @return
3697  *   0 on success otherwise -errno and errno is set.
3698  */
3699 static int
3700 flow_dv_encap_decap_resource_register
3701                         (struct rte_eth_dev *dev,
3702                          struct mlx5_flow_dv_encap_decap_resource *resource,
3703                          struct mlx5_flow *dev_flow,
3704                          struct rte_flow_error *error)
3705 {
3706         struct mlx5_priv *priv = dev->data->dev_private;
3707         struct mlx5_dev_ctx_shared *sh = priv->sh;
3708         struct mlx5_hlist_entry *entry;
3709         union {
3710                 struct {
3711                         uint32_t ft_type:8;
3712                         uint32_t refmt_type:8;
3713                         /*
3714                          * Header reformat actions can be shared between
3715                          * non-root tables. One bit to indicate non-root
3716                          * table or not.
3717                          */
3718                         uint32_t is_root:1;
3719                         uint32_t reserve:15;
3720                 };
3721                 uint32_t v32;
3722         } encap_decap_key = {
3723                 {
3724                         .ft_type = resource->ft_type,
3725                         .refmt_type = resource->reformat_type,
3726                         .is_root = !!dev_flow->dv.group,
3727                         .reserve = 0,
3728                 }
3729         };
3730         struct mlx5_flow_cb_ctx ctx = {
3731                 .error = error,
3732                 .data = resource,
3733         };
3734         uint64_t key64;
3735
3736         resource->flags = dev_flow->dv.group ? 0 : 1;
3737         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3738                                  sizeof(encap_decap_key.v32), 0);
3739         if (resource->reformat_type !=
3740             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3741             resource->size)
3742                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3743         entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx);
3744         if (!entry)
3745                 return -rte_errno;
3746         resource = container_of(entry, typeof(*resource), entry);
3747         dev_flow->dv.encap_decap = resource;
3748         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3749         return 0;
3750 }
3751
3752 /**
3753  * Find existing table jump resource or create and register a new one.
3754  *
3755  * @param[in, out] dev
3756  *   Pointer to rte_eth_dev structure.
3757  * @param[in, out] tbl
3758  *   Pointer to flow table resource.
3759  * @parm[in, out] dev_flow
3760  *   Pointer to the dev_flow.
3761  * @param[out] error
3762  *   pointer to error structure.
3763  *
3764  * @return
3765  *   0 on success otherwise -errno and errno is set.
3766  */
3767 static int
3768 flow_dv_jump_tbl_resource_register
3769                         (struct rte_eth_dev *dev __rte_unused,
3770                          struct mlx5_flow_tbl_resource *tbl,
3771                          struct mlx5_flow *dev_flow,
3772                          struct rte_flow_error *error __rte_unused)
3773 {
3774         struct mlx5_flow_tbl_data_entry *tbl_data =
3775                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3776
3777         MLX5_ASSERT(tbl);
3778         MLX5_ASSERT(tbl_data->jump.action);
3779         dev_flow->handle->rix_jump = tbl_data->idx;
3780         dev_flow->dv.jump = &tbl_data->jump;
3781         return 0;
3782 }
3783
3784 int
3785 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
3786                          struct mlx5_cache_entry *entry, void *cb_ctx)
3787 {
3788         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3789         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3790         struct mlx5_flow_dv_port_id_action_resource *res =
3791                         container_of(entry, typeof(*res), entry);
3792
3793         return ref->port_id != res->port_id;
3794 }
3795
3796 struct mlx5_cache_entry *
3797 flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
3798                           struct mlx5_cache_entry *entry __rte_unused,
3799                           void *cb_ctx)
3800 {
3801         struct mlx5_dev_ctx_shared *sh = list->ctx;
3802         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3803         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3804         struct mlx5_flow_dv_port_id_action_resource *cache;
3805         uint32_t idx;
3806         int ret;
3807
3808         /* Register new port id action resource. */
3809         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3810         if (!cache) {
3811                 rte_flow_error_set(ctx->error, ENOMEM,
3812                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3813                                    "cannot allocate port_id action cache memory");
3814                 return NULL;
3815         }
3816         *cache = *ref;
3817         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3818                                                         ref->port_id,
3819                                                         &cache->action);
3820         if (ret) {
3821                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3822                 rte_flow_error_set(ctx->error, ENOMEM,
3823                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3824                                    "cannot create action");
3825                 return NULL;
3826         }
3827         cache->idx = idx;
3828         return &cache->entry;
3829 }
3830
3831 /**
3832  * Find existing table port ID resource or create and register a new one.
3833  *
3834  * @param[in, out] dev
3835  *   Pointer to rte_eth_dev structure.
3836  * @param[in, out] resource
3837  *   Pointer to port ID action resource.
3838  * @parm[in, out] dev_flow
3839  *   Pointer to the dev_flow.
3840  * @param[out] error
3841  *   pointer to error structure.
3842  *
3843  * @return
3844  *   0 on success otherwise -errno and errno is set.
3845  */
3846 static int
3847 flow_dv_port_id_action_resource_register
3848                         (struct rte_eth_dev *dev,
3849                          struct mlx5_flow_dv_port_id_action_resource *resource,
3850                          struct mlx5_flow *dev_flow,
3851                          struct rte_flow_error *error)
3852 {
3853         struct mlx5_priv *priv = dev->data->dev_private;
3854         struct mlx5_cache_entry *entry;
3855         struct mlx5_flow_dv_port_id_action_resource *cache;
3856         struct mlx5_flow_cb_ctx ctx = {
3857                 .error = error,
3858                 .data = resource,
3859         };
3860
3861         entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
3862         if (!entry)
3863                 return -rte_errno;
3864         cache = container_of(entry, typeof(*cache), entry);
3865         dev_flow->dv.port_id_action = cache;
3866         dev_flow->handle->rix_port_id_action = cache->idx;
3867         return 0;
3868 }
3869
3870 int
3871 flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
3872                          struct mlx5_cache_entry *entry, void *cb_ctx)
3873 {
3874         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3875         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3876         struct mlx5_flow_dv_push_vlan_action_resource *res =
3877                         container_of(entry, typeof(*res), entry);
3878
3879         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3880 }
3881
3882 struct mlx5_cache_entry *
3883 flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
3884                           struct mlx5_cache_entry *entry __rte_unused,
3885                           void *cb_ctx)
3886 {
3887         struct mlx5_dev_ctx_shared *sh = list->ctx;
3888         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3889         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3890         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3891         struct mlx5dv_dr_domain *domain;
3892         uint32_t idx;
3893         int ret;
3894
3895         /* Register new port id action resource. */
3896         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3897         if (!cache) {
3898                 rte_flow_error_set(ctx->error, ENOMEM,
3899                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3900                                    "cannot allocate push_vlan action cache memory");
3901                 return NULL;
3902         }
3903         *cache = *ref;
3904         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3905                 domain = sh->fdb_domain;
3906         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3907                 domain = sh->rx_domain;
3908         else
3909                 domain = sh->tx_domain;
3910         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3911                                                         &cache->action);
3912         if (ret) {
3913                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3914                 rte_flow_error_set(ctx->error, ENOMEM,
3915                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3916                                    "cannot create push vlan action");
3917                 return NULL;
3918         }
3919         cache->idx = idx;
3920         return &cache->entry;
3921 }
3922
3923 /**
3924  * Find existing push vlan resource or create and register a new one.
3925  *
3926  * @param [in, out] dev
3927  *   Pointer to rte_eth_dev structure.
3928  * @param[in, out] resource
3929  *   Pointer to port ID action resource.
3930  * @parm[in, out] dev_flow
3931  *   Pointer to the dev_flow.
3932  * @param[out] error
3933  *   pointer to error structure.
3934  *
3935  * @return
3936  *   0 on success otherwise -errno and errno is set.
3937  */
3938 static int
3939 flow_dv_push_vlan_action_resource_register
3940                        (struct rte_eth_dev *dev,
3941                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
3942                         struct mlx5_flow *dev_flow,
3943                         struct rte_flow_error *error)
3944 {
3945         struct mlx5_priv *priv = dev->data->dev_private;
3946         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3947         struct mlx5_cache_entry *entry;
3948         struct mlx5_flow_cb_ctx ctx = {
3949                 .error = error,
3950                 .data = resource,
3951         };
3952
3953         entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
3954         if (!entry)
3955                 return -rte_errno;
3956         cache = container_of(entry, typeof(*cache), entry);
3957
3958         dev_flow->handle->dvh.rix_push_vlan = cache->idx;
3959         dev_flow->dv.push_vlan_res = cache;
3960         return 0;
3961 }
3962
3963 /**
3964  * Get the size of specific rte_flow_item_type hdr size
3965  *
3966  * @param[in] item_type
3967  *   Tested rte_flow_item_type.
3968  *
3969  * @return
3970  *   sizeof struct item_type, 0 if void or irrelevant.
3971  */
3972 static size_t
3973 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3974 {
3975         size_t retval;
3976
3977         switch (item_type) {
3978         case RTE_FLOW_ITEM_TYPE_ETH:
3979                 retval = sizeof(struct rte_ether_hdr);
3980                 break;
3981         case RTE_FLOW_ITEM_TYPE_VLAN:
3982                 retval = sizeof(struct rte_vlan_hdr);
3983                 break;
3984         case RTE_FLOW_ITEM_TYPE_IPV4:
3985                 retval = sizeof(struct rte_ipv4_hdr);
3986                 break;
3987         case RTE_FLOW_ITEM_TYPE_IPV6:
3988                 retval = sizeof(struct rte_ipv6_hdr);
3989                 break;
3990         case RTE_FLOW_ITEM_TYPE_UDP:
3991                 retval = sizeof(struct rte_udp_hdr);
3992                 break;
3993         case RTE_FLOW_ITEM_TYPE_TCP:
3994                 retval = sizeof(struct rte_tcp_hdr);
3995                 break;
3996         case RTE_FLOW_ITEM_TYPE_VXLAN:
3997         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3998                 retval = sizeof(struct rte_vxlan_hdr);
3999                 break;
4000         case RTE_FLOW_ITEM_TYPE_GRE:
4001         case RTE_FLOW_ITEM_TYPE_NVGRE:
4002                 retval = sizeof(struct rte_gre_hdr);
4003                 break;
4004         case RTE_FLOW_ITEM_TYPE_MPLS:
4005                 retval = sizeof(struct rte_mpls_hdr);
4006                 break;
4007         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
4008         default:
4009                 retval = 0;
4010                 break;
4011         }
4012         return retval;
4013 }
4014
4015 #define MLX5_ENCAP_IPV4_VERSION         0x40
4016 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
4017 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
4018 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
4019 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
4020 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
4021 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
4022
4023 /**
4024  * Convert the encap action data from list of rte_flow_item to raw buffer
4025  *
4026  * @param[in] items
4027  *   Pointer to rte_flow_item objects list.
4028  * @param[out] buf
4029  *   Pointer to the output buffer.
4030  * @param[out] size
4031  *   Pointer to the output buffer size.
4032  * @param[out] error
4033  *   Pointer to the error structure.
4034  *
4035  * @return
4036  *   0 on success, a negative errno value otherwise and rte_errno is set.
4037  */
4038 static int
4039 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
4040                            size_t *size, struct rte_flow_error *error)
4041 {
4042         struct rte_ether_hdr *eth = NULL;
4043         struct rte_vlan_hdr *vlan = NULL;
4044         struct rte_ipv4_hdr *ipv4 = NULL;
4045         struct rte_ipv6_hdr *ipv6 = NULL;
4046         struct rte_udp_hdr *udp = NULL;
4047         struct rte_vxlan_hdr *vxlan = NULL;
4048         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
4049         struct rte_gre_hdr *gre = NULL;
4050         size_t len;
4051         size_t temp_size = 0;
4052
4053         if (!items)
4054                 return rte_flow_error_set(error, EINVAL,
4055                                           RTE_FLOW_ERROR_TYPE_ACTION,
4056                                           NULL, "invalid empty data");
4057         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4058                 len = flow_dv_get_item_hdr_len(items->type);
4059                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
4060                         return rte_flow_error_set(error, EINVAL,
4061                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4062                                                   (void *)items->type,
4063                                                   "items total size is too big"
4064                                                   " for encap action");
4065                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
4066                 switch (items->type) {
4067                 case RTE_FLOW_ITEM_TYPE_ETH:
4068                         eth = (struct rte_ether_hdr *)&buf[temp_size];
4069                         break;
4070                 case RTE_FLOW_ITEM_TYPE_VLAN:
4071                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
4072                         if (!eth)
4073                                 return rte_flow_error_set(error, EINVAL,
4074                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4075                                                 (void *)items->type,
4076                                                 "eth header not found");
4077                         if (!eth->ether_type)
4078                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
4079                         break;
4080                 case RTE_FLOW_ITEM_TYPE_IPV4:
4081                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
4082                         if (!vlan && !eth)
4083                                 return rte_flow_error_set(error, EINVAL,
4084                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4085                                                 (void *)items->type,
4086                                                 "neither eth nor vlan"
4087                                                 " header found");
4088                         if (vlan && !vlan->eth_proto)
4089                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4090                         else if (eth && !eth->ether_type)
4091                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4092                         if (!ipv4->version_ihl)
4093                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
4094                                                     MLX5_ENCAP_IPV4_IHL_MIN;
4095                         if (!ipv4->time_to_live)
4096                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
4097                         break;
4098                 case RTE_FLOW_ITEM_TYPE_IPV6:
4099                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
4100                         if (!vlan && !eth)
4101                                 return rte_flow_error_set(error, EINVAL,
4102                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4103                                                 (void *)items->type,
4104                                                 "neither eth nor vlan"
4105                                                 " header found");
4106                         if (vlan && !vlan->eth_proto)
4107                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4108                         else if (eth && !eth->ether_type)
4109                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4110                         if (!ipv6->vtc_flow)
4111                                 ipv6->vtc_flow =
4112                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
4113                         if (!ipv6->hop_limits)
4114                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
4115                         break;
4116                 case RTE_FLOW_ITEM_TYPE_UDP:
4117                         udp = (struct rte_udp_hdr *)&buf[temp_size];
4118                         if (!ipv4 && !ipv6)
4119                                 return rte_flow_error_set(error, EINVAL,
4120                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4121                                                 (void *)items->type,
4122                                                 "ip header not found");
4123                         if (ipv4 && !ipv4->next_proto_id)
4124                                 ipv4->next_proto_id = IPPROTO_UDP;
4125                         else if (ipv6 && !ipv6->proto)
4126                                 ipv6->proto = IPPROTO_UDP;
4127                         break;
4128                 case RTE_FLOW_ITEM_TYPE_VXLAN:
4129                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
4130                         if (!udp)
4131                                 return rte_flow_error_set(error, EINVAL,
4132                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4133                                                 (void *)items->type,
4134                                                 "udp header not found");
4135                         if (!udp->dst_port)
4136                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
4137                         if (!vxlan->vx_flags)
4138                                 vxlan->vx_flags =
4139                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
4140                         break;
4141                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4142                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
4143                         if (!udp)
4144                                 return rte_flow_error_set(error, EINVAL,
4145                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4146                                                 (void *)items->type,
4147                                                 "udp header not found");
4148                         if (!vxlan_gpe->proto)
4149                                 return rte_flow_error_set(error, EINVAL,
4150                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4151                                                 (void *)items->type,
4152                                                 "next protocol not found");
4153                         if (!udp->dst_port)
4154                                 udp->dst_port =
4155                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
4156                         if (!vxlan_gpe->vx_flags)
4157                                 vxlan_gpe->vx_flags =
4158                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
4159                         break;
4160                 case RTE_FLOW_ITEM_TYPE_GRE:
4161                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4162                         gre = (struct rte_gre_hdr *)&buf[temp_size];
4163                         if (!gre->proto)
4164                                 return rte_flow_error_set(error, EINVAL,
4165                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4166                                                 (void *)items->type,
4167                                                 "next protocol not found");
4168                         if (!ipv4 && !ipv6)
4169                                 return rte_flow_error_set(error, EINVAL,
4170                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4171                                                 (void *)items->type,
4172                                                 "ip header not found");
4173                         if (ipv4 && !ipv4->next_proto_id)
4174                                 ipv4->next_proto_id = IPPROTO_GRE;
4175                         else if (ipv6 && !ipv6->proto)
4176                                 ipv6->proto = IPPROTO_GRE;
4177                         break;
4178                 case RTE_FLOW_ITEM_TYPE_VOID:
4179                         break;
4180                 default:
4181                         return rte_flow_error_set(error, EINVAL,
4182                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4183                                                   (void *)items->type,
4184                                                   "unsupported item type");
4185                         break;
4186                 }
4187                 temp_size += len;
4188         }
4189         *size = temp_size;
4190         return 0;
4191 }
4192
4193 static int
4194 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4195 {
4196         struct rte_ether_hdr *eth = NULL;
4197         struct rte_vlan_hdr *vlan = NULL;
4198         struct rte_ipv6_hdr *ipv6 = NULL;
4199         struct rte_udp_hdr *udp = NULL;
4200         char *next_hdr;
4201         uint16_t proto;
4202
4203         eth = (struct rte_ether_hdr *)data;
4204         next_hdr = (char *)(eth + 1);
4205         proto = RTE_BE16(eth->ether_type);
4206
4207         /* VLAN skipping */
4208         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4209                 vlan = (struct rte_vlan_hdr *)next_hdr;
4210                 proto = RTE_BE16(vlan->eth_proto);
4211                 next_hdr += sizeof(struct rte_vlan_hdr);
4212         }
4213
4214         /* HW calculates IPv4 csum. no need to proceed */
4215         if (proto == RTE_ETHER_TYPE_IPV4)
4216                 return 0;
4217
4218         /* non IPv4/IPv6 header. not supported */
4219         if (proto != RTE_ETHER_TYPE_IPV6) {
4220                 return rte_flow_error_set(error, ENOTSUP,
4221                                           RTE_FLOW_ERROR_TYPE_ACTION,
4222                                           NULL, "Cannot offload non IPv4/IPv6");
4223         }
4224
4225         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4226
4227         /* ignore non UDP */
4228         if (ipv6->proto != IPPROTO_UDP)
4229                 return 0;
4230
4231         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4232         udp->dgram_cksum = 0;
4233
4234         return 0;
4235 }
4236
4237 /**
4238  * Convert L2 encap action to DV specification.
4239  *
4240  * @param[in] dev
4241  *   Pointer to rte_eth_dev structure.
4242  * @param[in] action
4243  *   Pointer to action structure.
4244  * @param[in, out] dev_flow
4245  *   Pointer to the mlx5_flow.
4246  * @param[in] transfer
4247  *   Mark if the flow is E-Switch flow.
4248  * @param[out] error
4249  *   Pointer to the error structure.
4250  *
4251  * @return
4252  *   0 on success, a negative errno value otherwise and rte_errno is set.
4253  */
4254 static int
4255 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4256                                const struct rte_flow_action *action,
4257                                struct mlx5_flow *dev_flow,
4258                                uint8_t transfer,
4259                                struct rte_flow_error *error)
4260 {
4261         const struct rte_flow_item *encap_data;
4262         const struct rte_flow_action_raw_encap *raw_encap_data;
4263         struct mlx5_flow_dv_encap_decap_resource res = {
4264                 .reformat_type =
4265                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4266                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4267                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4268         };
4269
4270         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4271                 raw_encap_data =
4272                         (const struct rte_flow_action_raw_encap *)action->conf;
4273                 res.size = raw_encap_data->size;
4274                 memcpy(res.buf, raw_encap_data->data, res.size);
4275         } else {
4276                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4277                         encap_data =
4278                                 ((const struct rte_flow_action_vxlan_encap *)
4279                                                 action->conf)->definition;
4280                 else
4281                         encap_data =
4282                                 ((const struct rte_flow_action_nvgre_encap *)
4283                                                 action->conf)->definition;
4284                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4285                                                &res.size, error))
4286                         return -rte_errno;
4287         }
4288         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4289                 return -rte_errno;
4290         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4291                 return rte_flow_error_set(error, EINVAL,
4292                                           RTE_FLOW_ERROR_TYPE_ACTION,
4293                                           NULL, "can't create L2 encap action");
4294         return 0;
4295 }
4296
4297 /**
4298  * Convert L2 decap action to DV specification.
4299  *
4300  * @param[in] dev
4301  *   Pointer to rte_eth_dev structure.
4302  * @param[in, out] dev_flow
4303  *   Pointer to the mlx5_flow.
4304  * @param[in] transfer
4305  *   Mark if the flow is E-Switch flow.
4306  * @param[out] error
4307  *   Pointer to the error structure.
4308  *
4309  * @return
4310  *   0 on success, a negative errno value otherwise and rte_errno is set.
4311  */
4312 static int
4313 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4314                                struct mlx5_flow *dev_flow,
4315                                uint8_t transfer,
4316                                struct rte_flow_error *error)
4317 {
4318         struct mlx5_flow_dv_encap_decap_resource res = {
4319                 .size = 0,
4320                 .reformat_type =
4321                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4322                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4323                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4324         };
4325
4326         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4327                 return rte_flow_error_set(error, EINVAL,
4328                                           RTE_FLOW_ERROR_TYPE_ACTION,
4329                                           NULL, "can't create L2 decap action");
4330         return 0;
4331 }
4332
4333 /**
4334  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4335  *
4336  * @param[in] dev
4337  *   Pointer to rte_eth_dev structure.
4338  * @param[in] action
4339  *   Pointer to action structure.
4340  * @param[in, out] dev_flow
4341  *   Pointer to the mlx5_flow.
4342  * @param[in] attr
4343  *   Pointer to the flow attributes.
4344  * @param[out] error
4345  *   Pointer to the error structure.
4346  *
4347  * @return
4348  *   0 on success, a negative errno value otherwise and rte_errno is set.
4349  */
4350 static int
4351 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4352                                 const struct rte_flow_action *action,
4353                                 struct mlx5_flow *dev_flow,
4354                                 const struct rte_flow_attr *attr,
4355                                 struct rte_flow_error *error)
4356 {
4357         const struct rte_flow_action_raw_encap *encap_data;
4358         struct mlx5_flow_dv_encap_decap_resource res;
4359
4360         memset(&res, 0, sizeof(res));
4361         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4362         res.size = encap_data->size;
4363         memcpy(res.buf, encap_data->data, res.size);
4364         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4365                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4366                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4367         if (attr->transfer)
4368                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4369         else
4370                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4371                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4372         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4373                 return rte_flow_error_set(error, EINVAL,
4374                                           RTE_FLOW_ERROR_TYPE_ACTION,
4375                                           NULL, "can't create encap action");
4376         return 0;
4377 }
4378
4379 /**
4380  * Create action push VLAN.
4381  *
4382  * @param[in] dev
4383  *   Pointer to rte_eth_dev structure.
4384  * @param[in] attr
4385  *   Pointer to the flow attributes.
4386  * @param[in] vlan
4387  *   Pointer to the vlan to push to the Ethernet header.
4388  * @param[in, out] dev_flow
4389  *   Pointer to the mlx5_flow.
4390  * @param[out] error
4391  *   Pointer to the error structure.
4392  *
4393  * @return
4394  *   0 on success, a negative errno value otherwise and rte_errno is set.
4395  */
4396 static int
4397 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4398                                 const struct rte_flow_attr *attr,
4399                                 const struct rte_vlan_hdr *vlan,
4400                                 struct mlx5_flow *dev_flow,
4401                                 struct rte_flow_error *error)
4402 {
4403         struct mlx5_flow_dv_push_vlan_action_resource res;
4404
4405         memset(&res, 0, sizeof(res));
4406         res.vlan_tag =
4407                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4408                                  vlan->vlan_tci);
4409         if (attr->transfer)
4410                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4411         else
4412                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4413                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4414         return flow_dv_push_vlan_action_resource_register
4415                                             (dev, &res, dev_flow, error);
4416 }
4417
4418 /**
4419  * Validate the modify-header actions.
4420  *
4421  * @param[in] action_flags
4422  *   Holds the actions detected until now.
4423  * @param[in] action
4424  *   Pointer to the modify action.
4425  * @param[out] error
4426  *   Pointer to error structure.
4427  *
4428  * @return
4429  *   0 on success, a negative errno value otherwise and rte_errno is set.
4430  */
4431 static int
4432 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4433                                    const struct rte_flow_action *action,
4434                                    struct rte_flow_error *error)
4435 {
4436         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4437                 return rte_flow_error_set(error, EINVAL,
4438                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4439                                           NULL, "action configuration not set");
4440         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4441                 return rte_flow_error_set(error, EINVAL,
4442                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4443                                           "can't have encap action before"
4444                                           " modify action");
4445         return 0;
4446 }
4447
4448 /**
4449  * Validate the modify-header MAC address actions.
4450  *
4451  * @param[in] action_flags
4452  *   Holds the actions detected until now.
4453  * @param[in] action
4454  *   Pointer to the modify action.
4455  * @param[in] item_flags
4456  *   Holds the items detected.
4457  * @param[out] error
4458  *   Pointer to error structure.
4459  *
4460  * @return
4461  *   0 on success, a negative errno value otherwise and rte_errno is set.
4462  */
4463 static int
4464 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4465                                    const struct rte_flow_action *action,
4466                                    const uint64_t item_flags,
4467                                    struct rte_flow_error *error)
4468 {
4469         int ret = 0;
4470
4471         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4472         if (!ret) {
4473                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4474                         return rte_flow_error_set(error, EINVAL,
4475                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4476                                                   NULL,
4477                                                   "no L2 item in pattern");
4478         }
4479         return ret;
4480 }
4481
4482 /**
4483  * Validate the modify-header IPv4 address actions.
4484  *
4485  * @param[in] action_flags
4486  *   Holds the actions detected until now.
4487  * @param[in] action
4488  *   Pointer to the modify action.
4489  * @param[in] item_flags
4490  *   Holds the items detected.
4491  * @param[out] error
4492  *   Pointer to error structure.
4493  *
4494  * @return
4495  *   0 on success, a negative errno value otherwise and rte_errno is set.
4496  */
4497 static int
4498 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4499                                     const struct rte_flow_action *action,
4500                                     const uint64_t item_flags,
4501                                     struct rte_flow_error *error)
4502 {
4503         int ret = 0;
4504         uint64_t layer;
4505
4506         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4507         if (!ret) {
4508                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4509                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4510                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4511                 if (!(item_flags & layer))
4512                         return rte_flow_error_set(error, EINVAL,
4513                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4514                                                   NULL,
4515                                                   "no ipv4 item in pattern");
4516         }
4517         return ret;
4518 }
4519
4520 /**
4521  * Validate the modify-header IPv6 address actions.
4522  *
4523  * @param[in] action_flags
4524  *   Holds the actions detected until now.
4525  * @param[in] action
4526  *   Pointer to the modify action.
4527  * @param[in] item_flags
4528  *   Holds the items detected.
4529  * @param[out] error
4530  *   Pointer to error structure.
4531  *
4532  * @return
4533  *   0 on success, a negative errno value otherwise and rte_errno is set.
4534  */
4535 static int
4536 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4537                                     const struct rte_flow_action *action,
4538                                     const uint64_t item_flags,
4539                                     struct rte_flow_error *error)
4540 {
4541         int ret = 0;
4542         uint64_t layer;
4543
4544         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4545         if (!ret) {
4546                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4547                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4548                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4549                 if (!(item_flags & layer))
4550                         return rte_flow_error_set(error, EINVAL,
4551                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4552                                                   NULL,
4553                                                   "no ipv6 item in pattern");
4554         }
4555         return ret;
4556 }
4557
4558 /**
4559  * Validate the modify-header TP actions.
4560  *
4561  * @param[in] action_flags
4562  *   Holds the actions detected until now.
4563  * @param[in] action
4564  *   Pointer to the modify action.
4565  * @param[in] item_flags
4566  *   Holds the items detected.
4567  * @param[out] error
4568  *   Pointer to error structure.
4569  *
4570  * @return
4571  *   0 on success, a negative errno value otherwise and rte_errno is set.
4572  */
4573 static int
4574 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4575                                   const struct rte_flow_action *action,
4576                                   const uint64_t item_flags,
4577                                   struct rte_flow_error *error)
4578 {
4579         int ret = 0;
4580         uint64_t layer;
4581
4582         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4583         if (!ret) {
4584                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4585                                  MLX5_FLOW_LAYER_INNER_L4 :
4586                                  MLX5_FLOW_LAYER_OUTER_L4;
4587                 if (!(item_flags & layer))
4588                         return rte_flow_error_set(error, EINVAL,
4589                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4590                                                   NULL, "no transport layer "
4591                                                   "in pattern");
4592         }
4593         return ret;
4594 }
4595
4596 /**
4597  * Validate the modify-header actions of increment/decrement
4598  * TCP Sequence-number.
4599  *
4600  * @param[in] action_flags
4601  *   Holds the actions detected until now.
4602  * @param[in] action
4603  *   Pointer to the modify action.
4604  * @param[in] item_flags
4605  *   Holds the items detected.
4606  * @param[out] error
4607  *   Pointer to error structure.
4608  *
4609  * @return
4610  *   0 on success, a negative errno value otherwise and rte_errno is set.
4611  */
4612 static int
4613 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4614                                        const struct rte_flow_action *action,
4615                                        const uint64_t item_flags,
4616                                        struct rte_flow_error *error)
4617 {
4618         int ret = 0;
4619         uint64_t layer;
4620
4621         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4622         if (!ret) {
4623                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4624                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4625                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4626                 if (!(item_flags & layer))
4627                         return rte_flow_error_set(error, EINVAL,
4628                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4629                                                   NULL, "no TCP item in"
4630                                                   " pattern");
4631                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4632                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4633                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4634                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4635                         return rte_flow_error_set(error, EINVAL,
4636                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4637                                                   NULL,
4638                                                   "cannot decrease and increase"
4639                                                   " TCP sequence number"
4640                                                   " at the same time");
4641         }
4642         return ret;
4643 }
4644
4645 /**
4646  * Validate the modify-header actions of increment/decrement
4647  * TCP Acknowledgment number.
4648  *
4649  * @param[in] action_flags
4650  *   Holds the actions detected until now.
4651  * @param[in] action
4652  *   Pointer to the modify action.
4653  * @param[in] item_flags
4654  *   Holds the items detected.
4655  * @param[out] error
4656  *   Pointer to error structure.
4657  *
4658  * @return
4659  *   0 on success, a negative errno value otherwise and rte_errno is set.
4660  */
4661 static int
4662 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4663                                        const struct rte_flow_action *action,
4664                                        const uint64_t item_flags,
4665                                        struct rte_flow_error *error)
4666 {
4667         int ret = 0;
4668         uint64_t layer;
4669
4670         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4671         if (!ret) {
4672                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4673                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4674                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4675                 if (!(item_flags & layer))
4676                         return rte_flow_error_set(error, EINVAL,
4677                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4678                                                   NULL, "no TCP item in"
4679                                                   " pattern");
4680                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4681                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4682                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4683                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4684                         return rte_flow_error_set(error, EINVAL,
4685                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4686                                                   NULL,
4687                                                   "cannot decrease and increase"
4688                                                   " TCP acknowledgment number"
4689                                                   " at the same time");
4690         }
4691         return ret;
4692 }
4693
4694 /**
4695  * Validate the modify-header TTL actions.
4696  *
4697  * @param[in] action_flags
4698  *   Holds the actions detected until now.
4699  * @param[in] action
4700  *   Pointer to the modify action.
4701  * @param[in] item_flags
4702  *   Holds the items detected.
4703  * @param[out] error
4704  *   Pointer to error structure.
4705  *
4706  * @return
4707  *   0 on success, a negative errno value otherwise and rte_errno is set.
4708  */
4709 static int
4710 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4711                                    const struct rte_flow_action *action,
4712                                    const uint64_t item_flags,
4713                                    struct rte_flow_error *error)
4714 {
4715         int ret = 0;
4716         uint64_t layer;
4717
4718         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4719         if (!ret) {
4720                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4721                                  MLX5_FLOW_LAYER_INNER_L3 :
4722                                  MLX5_FLOW_LAYER_OUTER_L3;
4723                 if (!(item_flags & layer))
4724                         return rte_flow_error_set(error, EINVAL,
4725                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4726                                                   NULL,
4727                                                   "no IP protocol in pattern");
4728         }
4729         return ret;
4730 }
4731
4732 /**
4733  * Validate the generic modify field actions.
4734  * @param[in] dev
4735  *   Pointer to the rte_eth_dev structure.
4736  * @param[in] action_flags
4737  *   Holds the actions detected until now.
4738  * @param[in] action
4739  *   Pointer to the modify action.
4740  * @param[in] attr
4741  *   Pointer to the flow attributes.
4742  * @param[out] error
4743  *   Pointer to error structure.
4744  *
4745  * @return
4746  *   Number of header fields to modify (0 or more) on success,
4747  *   a negative errno value otherwise and rte_errno is set.
4748  */
4749 static int
4750 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4751                                    const uint64_t action_flags,
4752                                    const struct rte_flow_action *action,
4753                                    const struct rte_flow_attr *attr,
4754                                    struct rte_flow_error *error)
4755 {
4756         int ret = 0;
4757         struct mlx5_priv *priv = dev->data->dev_private;
4758         struct mlx5_dev_config *config = &priv->config;
4759         const struct rte_flow_action_modify_field *action_modify_field =
4760                 action->conf;
4761         uint32_t dst_width = mlx5_flow_item_field_width(config,
4762                                 action_modify_field->dst.field);
4763         uint32_t src_width = mlx5_flow_item_field_width(config,
4764                                 action_modify_field->src.field);
4765
4766         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4767         if (ret)
4768                 return ret;
4769
4770         if (action_modify_field->width == 0)
4771                 return rte_flow_error_set(error, EINVAL,
4772                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4773                                 "no bits are requested to be modified");
4774         else if (action_modify_field->width > dst_width ||
4775                  action_modify_field->width > src_width)
4776                 return rte_flow_error_set(error, EINVAL,
4777                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4778                                 "cannot modify more bits than"
4779                                 " the width of a field");
4780         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4781             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4782                 if ((action_modify_field->dst.offset +
4783                      action_modify_field->width > dst_width) ||
4784                     (action_modify_field->dst.offset % 32))
4785                         return rte_flow_error_set(error, EINVAL,
4786                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4787                                         "destination offset is too big"
4788                                         " or not aligned to 4 bytes");
4789                 if (action_modify_field->dst.level &&
4790                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4791                         return rte_flow_error_set(error, ENOTSUP,
4792                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4793                                         "inner header fields modification"
4794                                         " is not supported");
4795         }
4796         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4797             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4798                 if (!attr->transfer && !attr->group)
4799                         return rte_flow_error_set(error, ENOTSUP,
4800                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4801                                         "modify field action is not"
4802                                         " supported for group 0");
4803                 if ((action_modify_field->src.offset +
4804                      action_modify_field->width > src_width) ||
4805                     (action_modify_field->src.offset % 32))
4806                         return rte_flow_error_set(error, EINVAL,
4807                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4808                                         "source offset is too big"
4809                                         " or not aligned to 4 bytes");
4810                 if (action_modify_field->src.level &&
4811                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4812                         return rte_flow_error_set(error, ENOTSUP,
4813                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4814                                         "inner header fields modification"
4815                                         " is not supported");
4816         }
4817         if ((action_modify_field->dst.field ==
4818              action_modify_field->src.field) &&
4819             (action_modify_field->dst.level ==
4820              action_modify_field->src.level))
4821                 return rte_flow_error_set(error, EINVAL,
4822                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4823                                 "source and destination fields"
4824                                 " cannot be the same");
4825         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4826             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER)
4827                 return rte_flow_error_set(error, EINVAL,
4828                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4829                                 "immediate value or a pointer to it"
4830                                 " cannot be used as a destination");
4831         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4832             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4833                 return rte_flow_error_set(error, ENOTSUP,
4834                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4835                                 "modifications of an arbitrary"
4836                                 " place in a packet is not supported");
4837         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4838             action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4839                 return rte_flow_error_set(error, ENOTSUP,
4840                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4841                                 "modifications of the 802.1Q Tag"
4842                                 " Identifier is not supported");
4843         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4844             action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4845                 return rte_flow_error_set(error, ENOTSUP,
4846                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4847                                 "modifications of the VXLAN Network"
4848                                 " Identifier is not supported");
4849         if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4850             action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4851                 return rte_flow_error_set(error, ENOTSUP,
4852                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4853                                 "modifications of the GENEVE Network"
4854                                 " Identifier is not supported");
4855         if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4856             action_modify_field->src.field == RTE_FLOW_FIELD_MARK ||
4857             action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
4858             action_modify_field->src.field == RTE_FLOW_FIELD_META) {
4859                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4860                     !mlx5_flow_ext_mreg_supported(dev))
4861                         return rte_flow_error_set(error, ENOTSUP,
4862                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4863                                         "cannot modify mark or metadata without"
4864                                         " extended metadata register support");
4865         }
4866         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4867                 return rte_flow_error_set(error, ENOTSUP,
4868                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4869                                 "add and sub operations"
4870                                 " are not supported");
4871         return (action_modify_field->width / 32) +
4872                !!(action_modify_field->width % 32);
4873 }
4874
4875 /**
4876  * Validate jump action.
4877  *
4878  * @param[in] action
4879  *   Pointer to the jump action.
4880  * @param[in] action_flags
4881  *   Holds the actions detected until now.
4882  * @param[in] attributes
4883  *   Pointer to flow attributes
4884  * @param[in] external
4885  *   Action belongs to flow rule created by request external to PMD.
4886  * @param[out] error
4887  *   Pointer to error structure.
4888  *
4889  * @return
4890  *   0 on success, a negative errno value otherwise and rte_errno is set.
4891  */
4892 static int
4893 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4894                              const struct mlx5_flow_tunnel *tunnel,
4895                              const struct rte_flow_action *action,
4896                              uint64_t action_flags,
4897                              const struct rte_flow_attr *attributes,
4898                              bool external, struct rte_flow_error *error)
4899 {
4900         uint32_t target_group, table;
4901         int ret = 0;
4902         struct flow_grp_info grp_info = {
4903                 .external = !!external,
4904                 .transfer = !!attributes->transfer,
4905                 .fdb_def_rule = 1,
4906                 .std_tbl_fix = 0
4907         };
4908         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4909                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4910                 return rte_flow_error_set(error, EINVAL,
4911                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4912                                           "can't have 2 fate actions in"
4913                                           " same flow");
4914         if (!action->conf)
4915                 return rte_flow_error_set(error, EINVAL,
4916                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4917                                           NULL, "action configuration not set");
4918         target_group =
4919                 ((const struct rte_flow_action_jump *)action->conf)->group;
4920         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4921                                        &grp_info, error);
4922         if (ret)
4923                 return ret;
4924         if (attributes->group == target_group &&
4925             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4926                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
4927                 return rte_flow_error_set(error, EINVAL,
4928                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4929                                           "target group must be other than"
4930                                           " the current flow group");
4931         return 0;
4932 }
4933
4934 /*
4935  * Validate the port_id action.
4936  *
4937  * @param[in] dev
4938  *   Pointer to rte_eth_dev structure.
4939  * @param[in] action_flags
4940  *   Bit-fields that holds the actions detected until now.
4941  * @param[in] action
4942  *   Port_id RTE action structure.
4943  * @param[in] attr
4944  *   Attributes of flow that includes this action.
4945  * @param[out] error
4946  *   Pointer to error structure.
4947  *
4948  * @return
4949  *   0 on success, a negative errno value otherwise and rte_errno is set.
4950  */
4951 static int
4952 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
4953                                 uint64_t action_flags,
4954                                 const struct rte_flow_action *action,
4955                                 const struct rte_flow_attr *attr,
4956                                 struct rte_flow_error *error)
4957 {
4958         const struct rte_flow_action_port_id *port_id;
4959         struct mlx5_priv *act_priv;
4960         struct mlx5_priv *dev_priv;
4961         uint16_t port;
4962
4963         if (!attr->transfer)
4964                 return rte_flow_error_set(error, ENOTSUP,
4965                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4966                                           NULL,
4967                                           "port id action is valid in transfer"
4968                                           " mode only");
4969         if (!action || !action->conf)
4970                 return rte_flow_error_set(error, ENOTSUP,
4971                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4972                                           NULL,
4973                                           "port id action parameters must be"
4974                                           " specified");
4975         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4976                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4977                 return rte_flow_error_set(error, EINVAL,
4978                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4979                                           "can have only one fate actions in"
4980                                           " a flow");
4981         dev_priv = mlx5_dev_to_eswitch_info(dev);
4982         if (!dev_priv)
4983                 return rte_flow_error_set(error, rte_errno,
4984                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4985                                           NULL,
4986                                           "failed to obtain E-Switch info");
4987         port_id = action->conf;
4988         port = port_id->original ? dev->data->port_id : port_id->id;
4989         act_priv = mlx5_port_to_eswitch_info(port, false);
4990         if (!act_priv)
4991                 return rte_flow_error_set
4992                                 (error, rte_errno,
4993                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4994                                  "failed to obtain E-Switch port id for port");
4995         if (act_priv->domain_id != dev_priv->domain_id)
4996                 return rte_flow_error_set
4997                                 (error, EINVAL,
4998                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4999                                  "port does not belong to"
5000                                  " E-Switch being configured");
5001         return 0;
5002 }
5003
5004 /**
5005  * Get the maximum number of modify header actions.
5006  *
5007  * @param dev
5008  *   Pointer to rte_eth_dev structure.
5009  * @param flags
5010  *   Flags bits to check if root level.
5011  *
5012  * @return
5013  *   Max number of modify header actions device can support.
5014  */
5015 static inline unsigned int
5016 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
5017                               uint64_t flags)
5018 {
5019         /*
5020          * There's no way to directly query the max capacity from FW.
5021          * The maximal value on root table should be assumed to be supported.
5022          */
5023         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
5024                 return MLX5_MAX_MODIFY_NUM;
5025         else
5026                 return MLX5_ROOT_TBL_MODIFY_NUM;
5027 }
5028
5029 /**
5030  * Validate the meter action.
5031  *
5032  * @param[in] dev
5033  *   Pointer to rte_eth_dev structure.
5034  * @param[in] action_flags
5035  *   Bit-fields that holds the actions detected until now.
5036  * @param[in] action
5037  *   Pointer to the meter action.
5038  * @param[in] attr
5039  *   Attributes of flow that includes this action.
5040  * @param[in] port_id_item
5041  *   Pointer to item indicating port id.
5042  * @param[out] error
5043  *   Pointer to error structure.
5044  *
5045  * @return
5046  *   0 on success, a negative errno value otherwise and rte_ernno is set.
5047  */
5048 static int
5049 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
5050                                 uint64_t action_flags,
5051                                 const struct rte_flow_action *action,
5052                                 const struct rte_flow_attr *attr,
5053                                 const struct rte_flow_item *port_id_item,
5054                                 bool *def_policy,
5055                                 struct rte_flow_error *error)
5056 {
5057         struct mlx5_priv *priv = dev->data->dev_private;
5058         const struct rte_flow_action_meter *am = action->conf;
5059         struct mlx5_flow_meter_info *fm;
5060         struct mlx5_flow_meter_policy *mtr_policy;
5061         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
5062
5063         if (!am)
5064                 return rte_flow_error_set(error, EINVAL,
5065                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5066                                           "meter action conf is NULL");
5067
5068         if (action_flags & MLX5_FLOW_ACTION_METER)
5069                 return rte_flow_error_set(error, ENOTSUP,
5070                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5071                                           "meter chaining not support");
5072         if (action_flags & MLX5_FLOW_ACTION_JUMP)
5073                 return rte_flow_error_set(error, ENOTSUP,
5074                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5075                                           "meter with jump not support");
5076         if (!priv->mtr_en)
5077                 return rte_flow_error_set(error, ENOTSUP,
5078                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5079                                           NULL,
5080                                           "meter action not supported");
5081         fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
5082         if (!fm)
5083                 return rte_flow_error_set(error, EINVAL,
5084                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5085                                           "Meter not found");
5086         /* aso meter can always be shared by different domains */
5087         if (fm->ref_cnt && !priv->sh->meter_aso_en &&
5088             !(fm->transfer == attr->transfer ||
5089               (!fm->ingress && !attr->ingress && attr->egress) ||
5090               (!fm->egress && !attr->egress && attr->ingress)))
5091                 return rte_flow_error_set(error, EINVAL,
5092                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5093                         "Flow attributes domain are either invalid "
5094                         "or have a domain conflict with current "
5095                         "meter attributes");
5096         if (fm->def_policy) {
5097                 if (!((attr->transfer &&
5098                         mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
5099                         (attr->egress &&
5100                         mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
5101                         (attr->ingress &&
5102                         mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
5103                         return rte_flow_error_set(error, EINVAL,
5104                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5105                                           "Flow attributes domain "
5106                                           "have a conflict with current "
5107                                           "meter domain attributes");
5108                 *def_policy = true;
5109         } else {
5110                 mtr_policy = mlx5_flow_meter_policy_find(dev,
5111                                                 fm->policy_id, NULL);
5112                 if (!mtr_policy)
5113                         return rte_flow_error_set(error, EINVAL,
5114                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5115                                           "Invalid policy id for meter ");
5116                 if (!((attr->transfer && mtr_policy->transfer) ||
5117                         (attr->egress && mtr_policy->egress) ||
5118                         (attr->ingress && mtr_policy->ingress)))
5119                         return rte_flow_error_set(error, EINVAL,
5120                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5121                                           "Flow attributes domain "
5122                                           "have a conflict with current "
5123                                           "meter domain attributes");
5124                 if (attr->transfer && mtr_policy->dev) {
5125                         /**
5126                          * When policy has fate action of port_id,
5127                          * the flow should have the same src port as policy.
5128                          */
5129                         struct mlx5_priv *policy_port_priv =
5130                                         mtr_policy->dev->data->dev_private;
5131                         int32_t flow_src_port = priv->representor_id;
5132
5133                         if (port_id_item) {
5134                                 const struct rte_flow_item_port_id *spec =
5135                                                         port_id_item->spec;
5136                                 struct mlx5_priv *port_priv =
5137                                         mlx5_port_to_eswitch_info(spec->id,
5138                                                                   false);
5139                                 if (!port_priv)
5140                                         return rte_flow_error_set(error,
5141                                                 rte_errno,
5142                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5143                                                 spec,
5144                                                 "Failed to get port info.");
5145                                 flow_src_port = port_priv->representor_id;
5146                         }
5147                         if (flow_src_port != policy_port_priv->representor_id)
5148                                 return rte_flow_error_set(error,
5149                                                 rte_errno,
5150                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5151                                                 NULL,
5152                                                 "Flow and meter policy "
5153                                                 "have different src port.");
5154                 }
5155                 *def_policy = false;
5156         }
5157         return 0;
5158 }
5159
5160 /**
5161  * Validate the age action.
5162  *
5163  * @param[in] action_flags
5164  *   Holds the actions detected until now.
5165  * @param[in] action
5166  *   Pointer to the age action.
5167  * @param[in] dev
5168  *   Pointer to the Ethernet device structure.
5169  * @param[out] error
5170  *   Pointer to error structure.
5171  *
5172  * @return
5173  *   0 on success, a negative errno value otherwise and rte_errno is set.
5174  */
5175 static int
5176 flow_dv_validate_action_age(uint64_t action_flags,
5177                             const struct rte_flow_action *action,
5178                             struct rte_eth_dev *dev,
5179                             struct rte_flow_error *error)
5180 {
5181         struct mlx5_priv *priv = dev->data->dev_private;
5182         const struct rte_flow_action_age *age = action->conf;
5183
5184         if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
5185             !priv->sh->aso_age_mng))
5186                 return rte_flow_error_set(error, ENOTSUP,
5187                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5188                                           NULL,
5189                                           "age action not supported");
5190         if (!(action->conf))
5191                 return rte_flow_error_set(error, EINVAL,
5192                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5193                                           "configuration cannot be null");
5194         if (!(age->timeout))
5195                 return rte_flow_error_set(error, EINVAL,
5196                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5197                                           "invalid timeout value 0");
5198         if (action_flags & MLX5_FLOW_ACTION_AGE)
5199                 return rte_flow_error_set(error, EINVAL,
5200                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5201                                           "duplicate age actions set");
5202         return 0;
5203 }
5204
5205 /**
5206  * Validate the modify-header IPv4 DSCP actions.
5207  *
5208  * @param[in] action_flags
5209  *   Holds the actions detected until now.
5210  * @param[in] action
5211  *   Pointer to the modify action.
5212  * @param[in] item_flags
5213  *   Holds the items detected.
5214  * @param[out] error
5215  *   Pointer to error structure.
5216  *
5217  * @return
5218  *   0 on success, a negative errno value otherwise and rte_errno is set.
5219  */
5220 static int
5221 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5222                                          const struct rte_flow_action *action,
5223                                          const uint64_t item_flags,
5224                                          struct rte_flow_error *error)
5225 {
5226         int ret = 0;
5227
5228         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5229         if (!ret) {
5230                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5231                         return rte_flow_error_set(error, EINVAL,
5232                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5233                                                   NULL,
5234                                                   "no ipv4 item in pattern");
5235         }
5236         return ret;
5237 }
5238
5239 /**
5240  * Validate the modify-header IPv6 DSCP actions.
5241  *
5242  * @param[in] action_flags
5243  *   Holds the actions detected until now.
5244  * @param[in] action
5245  *   Pointer to the modify action.
5246  * @param[in] item_flags
5247  *   Holds the items detected.
5248  * @param[out] error
5249  *   Pointer to error structure.
5250  *
5251  * @return
5252  *   0 on success, a negative errno value otherwise and rte_errno is set.
5253  */
5254 static int
5255 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5256                                          const struct rte_flow_action *action,
5257                                          const uint64_t item_flags,
5258                                          struct rte_flow_error *error)
5259 {
5260         int ret = 0;
5261
5262         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5263         if (!ret) {
5264                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5265                         return rte_flow_error_set(error, EINVAL,
5266                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5267                                                   NULL,
5268                                                   "no ipv6 item in pattern");
5269         }
5270         return ret;
5271 }
5272
5273 /**
5274  * Match modify-header resource.
5275  *
5276  * @param list
5277  *   Pointer to the hash list.
5278  * @param entry
5279  *   Pointer to exist resource entry object.
5280  * @param key
5281  *   Key of the new entry.
5282  * @param ctx
5283  *   Pointer to new modify-header resource.
5284  *
5285  * @return
5286  *   0 on matching, non-zero otherwise.
5287  */
5288 int
5289 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
5290                         struct mlx5_hlist_entry *entry,
5291                         uint64_t key __rte_unused, void *cb_ctx)
5292 {
5293         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5294         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5295         struct mlx5_flow_dv_modify_hdr_resource *resource =
5296                         container_of(entry, typeof(*resource), entry);
5297         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5298
5299         key_len += ref->actions_num * sizeof(ref->actions[0]);
5300         return ref->actions_num != resource->actions_num ||
5301                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5302 }
5303
5304 struct mlx5_hlist_entry *
5305 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
5306                          void *cb_ctx)
5307 {
5308         struct mlx5_dev_ctx_shared *sh = list->ctx;
5309         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5310         struct mlx5dv_dr_domain *ns;
5311         struct mlx5_flow_dv_modify_hdr_resource *entry;
5312         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5313         int ret;
5314         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5315         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5316
5317         entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
5318                             SOCKET_ID_ANY);
5319         if (!entry) {
5320                 rte_flow_error_set(ctx->error, ENOMEM,
5321                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5322                                    "cannot allocate resource memory");
5323                 return NULL;
5324         }
5325         rte_memcpy(&entry->ft_type,
5326                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5327                    key_len + data_len);
5328         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5329                 ns = sh->fdb_domain;
5330         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5331                 ns = sh->tx_domain;
5332         else
5333                 ns = sh->rx_domain;
5334         ret = mlx5_flow_os_create_flow_action_modify_header
5335                                         (sh->ctx, ns, entry,
5336                                          data_len, &entry->action);
5337         if (ret) {
5338                 mlx5_free(entry);
5339                 rte_flow_error_set(ctx->error, ENOMEM,
5340                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5341                                    NULL, "cannot create modification action");
5342                 return NULL;
5343         }
5344         return &entry->entry;
5345 }
5346
5347 /**
5348  * Validate the sample action.
5349  *
5350  * @param[in, out] action_flags
5351  *   Holds the actions detected until now.
5352  * @param[in] action
5353  *   Pointer to the sample action.
5354  * @param[in] dev
5355  *   Pointer to the Ethernet device structure.
5356  * @param[in] attr
5357  *   Attributes of flow that includes this action.
5358  * @param[in] item_flags
5359  *   Holds the items detected.
5360  * @param[in] rss
5361  *   Pointer to the RSS action.
5362  * @param[out] sample_rss
5363  *   Pointer to the RSS action in sample action list.
5364  * @param[out] count
5365  *   Pointer to the COUNT action in sample action list.
5366  * @param[out] fdb_mirror_limit
5367  *   Pointer to the FDB mirror limitation flag.
5368  * @param[out] error
5369  *   Pointer to error structure.
5370  *
5371  * @return
5372  *   0 on success, a negative errno value otherwise and rte_errno is set.
5373  */
5374 static int
5375 flow_dv_validate_action_sample(uint64_t *action_flags,
5376                                const struct rte_flow_action *action,
5377                                struct rte_eth_dev *dev,
5378                                const struct rte_flow_attr *attr,
5379                                uint64_t item_flags,
5380                                const struct rte_flow_action_rss *rss,
5381                                const struct rte_flow_action_rss **sample_rss,
5382                                const struct rte_flow_action_count **count,
5383                                int *fdb_mirror_limit,
5384                                struct rte_flow_error *error)
5385 {
5386         struct mlx5_priv *priv = dev->data->dev_private;
5387         struct mlx5_dev_config *dev_conf = &priv->config;
5388         const struct rte_flow_action_sample *sample = action->conf;
5389         const struct rte_flow_action *act;
5390         uint64_t sub_action_flags = 0;
5391         uint16_t queue_index = 0xFFFF;
5392         int actions_n = 0;
5393         int ret;
5394
5395         if (!sample)
5396                 return rte_flow_error_set(error, EINVAL,
5397                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5398                                           "configuration cannot be NULL");
5399         if (sample->ratio == 0)
5400                 return rte_flow_error_set(error, EINVAL,
5401                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5402                                           "ratio value starts from 1");
5403         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
5404                 return rte_flow_error_set(error, ENOTSUP,
5405                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5406                                           NULL,
5407                                           "sample action not supported");
5408         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5409                 return rte_flow_error_set(error, EINVAL,
5410                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5411                                           "Multiple sample actions not "
5412                                           "supported");
5413         if (*action_flags & MLX5_FLOW_ACTION_METER)
5414                 return rte_flow_error_set(error, EINVAL,
5415                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5416                                           "wrong action order, meter should "
5417                                           "be after sample action");
5418         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5419                 return rte_flow_error_set(error, EINVAL,
5420                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5421                                           "wrong action order, jump should "
5422                                           "be after sample action");
5423         act = sample->actions;
5424         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5425                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5426                         return rte_flow_error_set(error, ENOTSUP,
5427                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5428                                                   act, "too many actions");
5429                 switch (act->type) {
5430                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5431                         ret = mlx5_flow_validate_action_queue(act,
5432                                                               sub_action_flags,
5433                                                               dev,
5434                                                               attr, error);
5435                         if (ret < 0)
5436                                 return ret;
5437                         queue_index = ((const struct rte_flow_action_queue *)
5438                                                         (act->conf))->index;
5439                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5440                         ++actions_n;
5441                         break;
5442                 case RTE_FLOW_ACTION_TYPE_RSS:
5443                         *sample_rss = act->conf;
5444                         ret = mlx5_flow_validate_action_rss(act,
5445                                                             sub_action_flags,
5446                                                             dev, attr,
5447                                                             item_flags,
5448                                                             error);
5449                         if (ret < 0)
5450                                 return ret;
5451                         if (rss && *sample_rss &&
5452                             ((*sample_rss)->level != rss->level ||
5453                             (*sample_rss)->types != rss->types))
5454                                 return rte_flow_error_set(error, ENOTSUP,
5455                                         RTE_FLOW_ERROR_TYPE_ACTION,
5456                                         NULL,
5457                                         "Can't use the different RSS types "
5458                                         "or level in the same flow");
5459                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5460                                 queue_index = (*sample_rss)->queue[0];
5461                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5462                         ++actions_n;
5463                         break;
5464                 case RTE_FLOW_ACTION_TYPE_MARK:
5465                         ret = flow_dv_validate_action_mark(dev, act,
5466                                                            sub_action_flags,
5467                                                            attr, error);
5468                         if (ret < 0)
5469                                 return ret;
5470                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5471                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5472                                                 MLX5_FLOW_ACTION_MARK_EXT;
5473                         else
5474                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5475                         ++actions_n;
5476                         break;
5477                 case RTE_FLOW_ACTION_TYPE_COUNT:
5478                         ret = flow_dv_validate_action_count
5479                                 (dev, is_shared_action_count(act),
5480                                  *action_flags | sub_action_flags,
5481                                  error);
5482                         if (ret < 0)
5483                                 return ret;
5484                         *count = act->conf;
5485                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5486                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5487                         ++actions_n;
5488                         break;
5489                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5490                         ret = flow_dv_validate_action_port_id(dev,
5491                                                               sub_action_flags,
5492                                                               act,
5493                                                               attr,
5494                                                               error);
5495                         if (ret)
5496                                 return ret;
5497                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5498                         ++actions_n;
5499                         break;
5500                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5501                         ret = flow_dv_validate_action_raw_encap_decap
5502                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5503                                  &actions_n, action, item_flags, error);
5504                         if (ret < 0)
5505                                 return ret;
5506                         ++actions_n;
5507                         break;
5508                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5509                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5510                         ret = flow_dv_validate_action_l2_encap(dev,
5511                                                                sub_action_flags,
5512                                                                act, attr,
5513                                                                error);
5514                         if (ret < 0)
5515                                 return ret;
5516                         sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5517                         ++actions_n;
5518                         break;
5519                 default:
5520                         return rte_flow_error_set(error, ENOTSUP,
5521                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5522                                                   NULL,
5523                                                   "Doesn't support optional "
5524                                                   "action");
5525                 }
5526         }
5527         if (attr->ingress && !attr->transfer) {
5528                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5529                                           MLX5_FLOW_ACTION_RSS)))
5530                         return rte_flow_error_set(error, EINVAL,
5531                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5532                                                   NULL,
5533                                                   "Ingress must has a dest "
5534                                                   "QUEUE for Sample");
5535         } else if (attr->egress && !attr->transfer) {
5536                 return rte_flow_error_set(error, ENOTSUP,
5537                                           RTE_FLOW_ERROR_TYPE_ACTION,
5538                                           NULL,
5539                                           "Sample Only support Ingress "
5540                                           "or E-Switch");
5541         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5542                 MLX5_ASSERT(attr->transfer);
5543                 if (sample->ratio > 1)
5544                         return rte_flow_error_set(error, ENOTSUP,
5545                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5546                                                   NULL,
5547                                                   "E-Switch doesn't support "
5548                                                   "any optional action "
5549                                                   "for sampling");
5550                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5551                         return rte_flow_error_set(error, ENOTSUP,
5552                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5553                                                   NULL,
5554                                                   "unsupported action QUEUE");
5555                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5556                         return rte_flow_error_set(error, ENOTSUP,
5557                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5558                                                   NULL,
5559                                                   "unsupported action QUEUE");
5560                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5561                         return rte_flow_error_set(error, EINVAL,
5562                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5563                                                   NULL,
5564                                                   "E-Switch must has a dest "
5565                                                   "port for mirroring");
5566                 if (!priv->config.hca_attr.reg_c_preserve &&
5567                      priv->representor_id != UINT16_MAX)
5568                         *fdb_mirror_limit = 1;
5569         }
5570         /* Continue validation for Xcap actions.*/
5571         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5572             (queue_index == 0xFFFF ||
5573              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5574                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5575                      MLX5_FLOW_XCAP_ACTIONS)
5576                         return rte_flow_error_set(error, ENOTSUP,
5577                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5578                                                   NULL, "encap and decap "
5579                                                   "combination aren't "
5580                                                   "supported");
5581                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5582                                                         MLX5_FLOW_ACTION_ENCAP))
5583                         return rte_flow_error_set(error, ENOTSUP,
5584                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5585                                                   NULL, "encap is not supported"
5586                                                   " for ingress traffic");
5587         }
5588         return 0;
5589 }
5590
5591 /**
5592  * Find existing modify-header resource or create and register a new one.
5593  *
5594  * @param dev[in, out]
5595  *   Pointer to rte_eth_dev structure.
5596  * @param[in, out] resource
5597  *   Pointer to modify-header resource.
5598  * @parm[in, out] dev_flow
5599  *   Pointer to the dev_flow.
5600  * @param[out] error
5601  *   pointer to error structure.
5602  *
5603  * @return
5604  *   0 on success otherwise -errno and errno is set.
5605  */
5606 static int
5607 flow_dv_modify_hdr_resource_register
5608                         (struct rte_eth_dev *dev,
5609                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5610                          struct mlx5_flow *dev_flow,
5611                          struct rte_flow_error *error)
5612 {
5613         struct mlx5_priv *priv = dev->data->dev_private;
5614         struct mlx5_dev_ctx_shared *sh = priv->sh;
5615         uint32_t key_len = sizeof(*resource) -
5616                            offsetof(typeof(*resource), ft_type) +
5617                            resource->actions_num * sizeof(resource->actions[0]);
5618         struct mlx5_hlist_entry *entry;
5619         struct mlx5_flow_cb_ctx ctx = {
5620                 .error = error,
5621                 .data = resource,
5622         };
5623         uint64_t key64;
5624
5625         resource->flags = dev_flow->dv.group ? 0 :
5626                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5627         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5628                                     resource->flags))
5629                 return rte_flow_error_set(error, EOVERFLOW,
5630                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5631                                           "too many modify header items");
5632         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5633         entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx);
5634         if (!entry)
5635                 return -rte_errno;
5636         resource = container_of(entry, typeof(*resource), entry);
5637         dev_flow->handle->dvh.modify_hdr = resource;
5638         return 0;
5639 }
5640
5641 /**
5642  * Get DV flow counter by index.
5643  *
5644  * @param[in] dev
5645  *   Pointer to the Ethernet device structure.
5646  * @param[in] idx
5647  *   mlx5 flow counter index in the container.
5648  * @param[out] ppool
5649  *   mlx5 flow counter pool in the container.
5650  *
5651  * @return
5652  *   Pointer to the counter, NULL otherwise.
5653  */
5654 static struct mlx5_flow_counter *
5655 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5656                            uint32_t idx,
5657                            struct mlx5_flow_counter_pool **ppool)
5658 {
5659         struct mlx5_priv *priv = dev->data->dev_private;
5660         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5661         struct mlx5_flow_counter_pool *pool;
5662
5663         /* Decrease to original index and clear shared bit. */
5664         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5665         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5666         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5667         MLX5_ASSERT(pool);
5668         if (ppool)
5669                 *ppool = pool;
5670         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5671 }
5672
5673 /**
5674  * Check the devx counter belongs to the pool.
5675  *
5676  * @param[in] pool
5677  *   Pointer to the counter pool.
5678  * @param[in] id
5679  *   The counter devx ID.
5680  *
5681  * @return
5682  *   True if counter belongs to the pool, false otherwise.
5683  */
5684 static bool
5685 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5686 {
5687         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5688                    MLX5_COUNTERS_PER_POOL;
5689
5690         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5691                 return true;
5692         return false;
5693 }
5694
5695 /**
5696  * Get a pool by devx counter ID.
5697  *
5698  * @param[in] cmng
5699  *   Pointer to the counter management.
5700  * @param[in] id
5701  *   The counter devx ID.
5702  *
5703  * @return
5704  *   The counter pool pointer if exists, NULL otherwise,
5705  */
5706 static struct mlx5_flow_counter_pool *
5707 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5708 {
5709         uint32_t i;
5710         struct mlx5_flow_counter_pool *pool = NULL;
5711
5712         rte_spinlock_lock(&cmng->pool_update_sl);
5713         /* Check last used pool. */
5714         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5715             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5716                 pool = cmng->pools[cmng->last_pool_idx];
5717                 goto out;
5718         }
5719         /* ID out of range means no suitable pool in the container. */
5720         if (id > cmng->max_id || id < cmng->min_id)
5721                 goto out;
5722         /*
5723          * Find the pool from the end of the container, since mostly counter
5724          * ID is sequence increasing, and the last pool should be the needed
5725          * one.
5726          */
5727         i = cmng->n_valid;
5728         while (i--) {
5729                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5730
5731                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5732                         pool = pool_tmp;
5733                         break;
5734                 }
5735         }
5736 out:
5737         rte_spinlock_unlock(&cmng->pool_update_sl);
5738         return pool;
5739 }
5740
5741 /**
5742  * Resize a counter container.
5743  *
5744  * @param[in] dev
5745  *   Pointer to the Ethernet device structure.
5746  *
5747  * @return
5748  *   0 on success, otherwise negative errno value and rte_errno is set.
5749  */
5750 static int
5751 flow_dv_container_resize(struct rte_eth_dev *dev)
5752 {
5753         struct mlx5_priv *priv = dev->data->dev_private;
5754         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5755         void *old_pools = cmng->pools;
5756         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5757         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5758         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5759
5760         if (!pools) {
5761                 rte_errno = ENOMEM;
5762                 return -ENOMEM;
5763         }
5764         if (old_pools)
5765                 memcpy(pools, old_pools, cmng->n *
5766                                        sizeof(struct mlx5_flow_counter_pool *));
5767         cmng->n = resize;
5768         cmng->pools = pools;
5769         if (old_pools)
5770                 mlx5_free(old_pools);
5771         return 0;
5772 }
5773
5774 /**
5775  * Query a devx flow counter.
5776  *
5777  * @param[in] dev
5778  *   Pointer to the Ethernet device structure.
5779  * @param[in] counter
5780  *   Index to the flow counter.
5781  * @param[out] pkts
5782  *   The statistics value of packets.
5783  * @param[out] bytes
5784  *   The statistics value of bytes.
5785  *
5786  * @return
5787  *   0 on success, otherwise a negative errno value and rte_errno is set.
5788  */
5789 static inline int
5790 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5791                      uint64_t *bytes)
5792 {
5793         struct mlx5_priv *priv = dev->data->dev_private;
5794         struct mlx5_flow_counter_pool *pool = NULL;
5795         struct mlx5_flow_counter *cnt;
5796         int offset;
5797
5798         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5799         MLX5_ASSERT(pool);
5800         if (priv->sh->cmng.counter_fallback)
5801                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
5802                                         0, pkts, bytes, 0, NULL, NULL, 0);
5803         rte_spinlock_lock(&pool->sl);
5804         if (!pool->raw) {
5805                 *pkts = 0;
5806                 *bytes = 0;
5807         } else {
5808                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
5809                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
5810                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
5811         }
5812         rte_spinlock_unlock(&pool->sl);
5813         return 0;
5814 }
5815
5816 /**
5817  * Create and initialize a new counter pool.
5818  *
5819  * @param[in] dev
5820  *   Pointer to the Ethernet device structure.
5821  * @param[out] dcs
5822  *   The devX counter handle.
5823  * @param[in] age
5824  *   Whether the pool is for counter that was allocated for aging.
5825  * @param[in/out] cont_cur
5826  *   Pointer to the container pointer, it will be update in pool resize.
5827  *
5828  * @return
5829  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
5830  */
5831 static struct mlx5_flow_counter_pool *
5832 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
5833                     uint32_t age)
5834 {
5835         struct mlx5_priv *priv = dev->data->dev_private;
5836         struct mlx5_flow_counter_pool *pool;
5837         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5838         bool fallback = priv->sh->cmng.counter_fallback;
5839         uint32_t size = sizeof(*pool);
5840
5841         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
5842         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
5843         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
5844         if (!pool) {
5845                 rte_errno = ENOMEM;
5846                 return NULL;
5847         }
5848         pool->raw = NULL;
5849         pool->is_aged = !!age;
5850         pool->query_gen = 0;
5851         pool->min_dcs = dcs;
5852         rte_spinlock_init(&pool->sl);
5853         rte_spinlock_init(&pool->csl);
5854         TAILQ_INIT(&pool->counters[0]);
5855         TAILQ_INIT(&pool->counters[1]);
5856         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
5857         rte_spinlock_lock(&cmng->pool_update_sl);
5858         pool->index = cmng->n_valid;
5859         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
5860                 mlx5_free(pool);
5861                 rte_spinlock_unlock(&cmng->pool_update_sl);
5862                 return NULL;
5863         }
5864         cmng->pools[pool->index] = pool;
5865         cmng->n_valid++;
5866         if (unlikely(fallback)) {
5867                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
5868
5869                 if (base < cmng->min_id)
5870                         cmng->min_id = base;
5871                 if (base > cmng->max_id)
5872                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
5873                 cmng->last_pool_idx = pool->index;
5874         }
5875         rte_spinlock_unlock(&cmng->pool_update_sl);
5876         return pool;
5877 }
5878
5879 /**
5880  * Prepare a new counter and/or a new counter pool.
5881  *
5882  * @param[in] dev
5883  *   Pointer to the Ethernet device structure.
5884  * @param[out] cnt_free
5885  *   Where to put the pointer of a new counter.
5886  * @param[in] age
5887  *   Whether the pool is for counter that was allocated for aging.
5888  *
5889  * @return
5890  *   The counter pool pointer and @p cnt_free is set on success,
5891  *   NULL otherwise and rte_errno is set.
5892  */
5893 static struct mlx5_flow_counter_pool *
5894 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
5895                              struct mlx5_flow_counter **cnt_free,
5896                              uint32_t age)
5897 {
5898         struct mlx5_priv *priv = dev->data->dev_private;
5899         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5900         struct mlx5_flow_counter_pool *pool;
5901         struct mlx5_counters tmp_tq;
5902         struct mlx5_devx_obj *dcs = NULL;
5903         struct mlx5_flow_counter *cnt;
5904         enum mlx5_counter_type cnt_type =
5905                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5906         bool fallback = priv->sh->cmng.counter_fallback;
5907         uint32_t i;
5908
5909         if (fallback) {
5910                 /* bulk_bitmap must be 0 for single counter allocation. */
5911                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
5912                 if (!dcs)
5913                         return NULL;
5914                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
5915                 if (!pool) {
5916                         pool = flow_dv_pool_create(dev, dcs, age);
5917                         if (!pool) {
5918                                 mlx5_devx_cmd_destroy(dcs);
5919                                 return NULL;
5920                         }
5921                 }
5922                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
5923                 cnt = MLX5_POOL_GET_CNT(pool, i);
5924                 cnt->pool = pool;
5925                 cnt->dcs_when_free = dcs;
5926                 *cnt_free = cnt;
5927                 return pool;
5928         }
5929         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
5930         if (!dcs) {
5931                 rte_errno = ENODATA;
5932                 return NULL;
5933         }
5934         pool = flow_dv_pool_create(dev, dcs, age);
5935         if (!pool) {
5936                 mlx5_devx_cmd_destroy(dcs);
5937                 return NULL;
5938         }
5939         TAILQ_INIT(&tmp_tq);
5940         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
5941                 cnt = MLX5_POOL_GET_CNT(pool, i);
5942                 cnt->pool = pool;
5943                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
5944         }
5945         rte_spinlock_lock(&cmng->csl[cnt_type]);
5946         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
5947         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5948         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
5949         (*cnt_free)->pool = pool;
5950         return pool;
5951 }
5952
5953 /**
5954  * Allocate a flow counter.
5955  *
5956  * @param[in] dev
5957  *   Pointer to the Ethernet device structure.
5958  * @param[in] age
5959  *   Whether the counter was allocated for aging.
5960  *
5961  * @return
5962  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5963  */
5964 static uint32_t
5965 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
5966 {
5967         struct mlx5_priv *priv = dev->data->dev_private;
5968         struct mlx5_flow_counter_pool *pool = NULL;
5969         struct mlx5_flow_counter *cnt_free = NULL;
5970         bool fallback = priv->sh->cmng.counter_fallback;
5971         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5972         enum mlx5_counter_type cnt_type =
5973                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5974         uint32_t cnt_idx;
5975
5976         if (!priv->config.devx) {
5977                 rte_errno = ENOTSUP;
5978                 return 0;
5979         }
5980         /* Get free counters from container. */
5981         rte_spinlock_lock(&cmng->csl[cnt_type]);
5982         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
5983         if (cnt_free)
5984                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
5985         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5986         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
5987                 goto err;
5988         pool = cnt_free->pool;
5989         if (fallback)
5990                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
5991         /* Create a DV counter action only in the first time usage. */
5992         if (!cnt_free->action) {
5993                 uint16_t offset;
5994                 struct mlx5_devx_obj *dcs;
5995                 int ret;
5996
5997                 if (!fallback) {
5998                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
5999                         dcs = pool->min_dcs;
6000                 } else {
6001                         offset = 0;
6002                         dcs = cnt_free->dcs_when_free;
6003                 }
6004                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
6005                                                             &cnt_free->action);
6006                 if (ret) {
6007                         rte_errno = errno;
6008                         goto err;
6009                 }
6010         }
6011         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
6012                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
6013         /* Update the counter reset values. */
6014         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
6015                                  &cnt_free->bytes))
6016                 goto err;
6017         if (!fallback && !priv->sh->cmng.query_thread_on)
6018                 /* Start the asynchronous batch query by the host thread. */
6019                 mlx5_set_query_alarm(priv->sh);
6020         /*
6021          * When the count action isn't shared (by ID), shared_info field is
6022          * used for indirect action API's refcnt.
6023          * When the counter action is not shared neither by ID nor by indirect
6024          * action API, shared info must be 1.
6025          */
6026         cnt_free->shared_info.refcnt = 1;
6027         return cnt_idx;
6028 err:
6029         if (cnt_free) {
6030                 cnt_free->pool = pool;
6031                 if (fallback)
6032                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
6033                 rte_spinlock_lock(&cmng->csl[cnt_type]);
6034                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
6035                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
6036         }
6037         return 0;
6038 }
6039
6040 /**
6041  * Allocate a shared flow counter.
6042  *
6043  * @param[in] ctx
6044  *   Pointer to the shared counter configuration.
6045  * @param[in] data
6046  *   Pointer to save the allocated counter index.
6047  *
6048  * @return
6049  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6050  */
6051
6052 static int32_t
6053 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
6054 {
6055         struct mlx5_shared_counter_conf *conf = ctx;
6056         struct rte_eth_dev *dev = conf->dev;
6057         struct mlx5_flow_counter *cnt;
6058
6059         data->dword = flow_dv_counter_alloc(dev, 0);
6060         data->dword |= MLX5_CNT_SHARED_OFFSET;
6061         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
6062         cnt->shared_info.id = conf->id;
6063         return 0;
6064 }
6065
6066 /**
6067  * Get a shared flow counter.
6068  *
6069  * @param[in] dev
6070  *   Pointer to the Ethernet device structure.
6071  * @param[in] id
6072  *   Counter identifier.
6073  *
6074  * @return
6075  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6076  */
6077 static uint32_t
6078 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
6079 {
6080         struct mlx5_priv *priv = dev->data->dev_private;
6081         struct mlx5_shared_counter_conf conf = {
6082                 .dev = dev,
6083                 .id = id,
6084         };
6085         union mlx5_l3t_data data = {
6086                 .dword = 0,
6087         };
6088
6089         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
6090                                flow_dv_counter_alloc_shared_cb, &conf);
6091         return data.dword;
6092 }
6093
6094 /**
6095  * Get age param from counter index.
6096  *
6097  * @param[in] dev
6098  *   Pointer to the Ethernet device structure.
6099  * @param[in] counter
6100  *   Index to the counter handler.
6101  *
6102  * @return
6103  *   The aging parameter specified for the counter index.
6104  */
6105 static struct mlx5_age_param*
6106 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
6107                                 uint32_t counter)
6108 {
6109         struct mlx5_flow_counter *cnt;
6110         struct mlx5_flow_counter_pool *pool = NULL;
6111
6112         flow_dv_counter_get_by_idx(dev, counter, &pool);
6113         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
6114         cnt = MLX5_POOL_GET_CNT(pool, counter);
6115         return MLX5_CNT_TO_AGE(cnt);
6116 }
6117
6118 /**
6119  * Remove a flow counter from aged counter list.
6120  *
6121  * @param[in] dev
6122  *   Pointer to the Ethernet device structure.
6123  * @param[in] counter
6124  *   Index to the counter handler.
6125  * @param[in] cnt
6126  *   Pointer to the counter handler.
6127  */
6128 static void
6129 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
6130                                 uint32_t counter, struct mlx5_flow_counter *cnt)
6131 {
6132         struct mlx5_age_info *age_info;
6133         struct mlx5_age_param *age_param;
6134         struct mlx5_priv *priv = dev->data->dev_private;
6135         uint16_t expected = AGE_CANDIDATE;
6136
6137         age_info = GET_PORT_AGE_INFO(priv);
6138         age_param = flow_dv_counter_idx_get_age(dev, counter);
6139         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
6140                                          AGE_FREE, false, __ATOMIC_RELAXED,
6141                                          __ATOMIC_RELAXED)) {
6142                 /**
6143                  * We need the lock even it is age timeout,
6144                  * since counter may still in process.
6145                  */
6146                 rte_spinlock_lock(&age_info->aged_sl);
6147                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
6148                 rte_spinlock_unlock(&age_info->aged_sl);
6149                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
6150         }
6151 }
6152
6153 /**
6154  * Release a flow counter.
6155  *
6156  * @param[in] dev
6157  *   Pointer to the Ethernet device structure.
6158  * @param[in] counter
6159  *   Index to the counter handler.
6160  */
6161 static void
6162 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
6163 {
6164         struct mlx5_priv *priv = dev->data->dev_private;
6165         struct mlx5_flow_counter_pool *pool = NULL;
6166         struct mlx5_flow_counter *cnt;
6167         enum mlx5_counter_type cnt_type;
6168
6169         if (!counter)
6170                 return;
6171         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6172         MLX5_ASSERT(pool);
6173         if (pool->is_aged) {
6174                 flow_dv_counter_remove_from_age(dev, counter, cnt);
6175         } else {
6176                 /*
6177                  * If the counter action is shared by ID, the l3t_clear_entry
6178                  * function reduces its references counter. If after the
6179                  * reduction the action is still referenced, the function
6180                  * returns here and does not release it.
6181                  */
6182                 if (IS_LEGACY_SHARED_CNT(counter) &&
6183                     mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl,
6184                                          cnt->shared_info.id))
6185                         return;
6186                 /*
6187                  * If the counter action is shared by indirect action API,
6188                  * the atomic function reduces its references counter.
6189                  * If after the reduction the action is still referenced, the
6190                  * function returns here and does not release it.
6191                  * When the counter action is not shared neither by ID nor by
6192                  * indirect action API, shared info is 1 before the reduction,
6193                  * so this condition is failed and function doesn't return here.
6194                  */
6195                 if (!IS_LEGACY_SHARED_CNT(counter) &&
6196                     __atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
6197                                        __ATOMIC_RELAXED))
6198                         return;
6199         }
6200         cnt->pool = pool;
6201         /*
6202          * Put the counter back to list to be updated in none fallback mode.
6203          * Currently, we are using two list alternately, while one is in query,
6204          * add the freed counter to the other list based on the pool query_gen
6205          * value. After query finishes, add counter the list to the global
6206          * container counter list. The list changes while query starts. In
6207          * this case, lock will not be needed as query callback and release
6208          * function both operate with the different list.
6209          */
6210         if (!priv->sh->cmng.counter_fallback) {
6211                 rte_spinlock_lock(&pool->csl);
6212                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
6213                 rte_spinlock_unlock(&pool->csl);
6214         } else {
6215                 cnt->dcs_when_free = cnt->dcs_when_active;
6216                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6217                                            MLX5_COUNTER_TYPE_ORIGIN;
6218                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
6219                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
6220                                   cnt, next);
6221                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
6222         }
6223 }
6224
6225 /**
6226  * Resize a meter id container.
6227  *
6228  * @param[in] dev
6229  *   Pointer to the Ethernet device structure.
6230  *
6231  * @return
6232  *   0 on success, otherwise negative errno value and rte_errno is set.
6233  */
6234 static int
6235 flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
6236 {
6237         struct mlx5_priv *priv = dev->data->dev_private;
6238         struct mlx5_aso_mtr_pools_mng *pools_mng =
6239                                 &priv->sh->mtrmng->pools_mng;
6240         void *old_pools = pools_mng->pools;
6241         uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
6242         uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
6243         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6244
6245         if (!pools) {
6246                 rte_errno = ENOMEM;
6247                 return -ENOMEM;
6248         }
6249         if (!pools_mng->n)
6250                 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6251                         mlx5_free(pools);
6252                         return -ENOMEM;
6253                 }
6254         if (old_pools)
6255                 memcpy(pools, old_pools, pools_mng->n *
6256                                        sizeof(struct mlx5_aso_mtr_pool *));
6257         pools_mng->n = resize;
6258         pools_mng->pools = pools;
6259         if (old_pools)
6260                 mlx5_free(old_pools);
6261         return 0;
6262 }
6263
6264 /**
6265  * Prepare a new meter and/or a new meter pool.
6266  *
6267  * @param[in] dev
6268  *   Pointer to the Ethernet device structure.
6269  * @param[out] mtr_free
6270  *   Where to put the pointer of a new meter.g.
6271  *
6272  * @return
6273  *   The meter pool pointer and @mtr_free is set on success,
6274  *   NULL otherwise and rte_errno is set.
6275  */
6276 static struct mlx5_aso_mtr_pool *
6277 flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
6278                              struct mlx5_aso_mtr **mtr_free)
6279 {
6280         struct mlx5_priv *priv = dev->data->dev_private;
6281         struct mlx5_aso_mtr_pools_mng *pools_mng =
6282                                 &priv->sh->mtrmng->pools_mng;
6283         struct mlx5_aso_mtr_pool *pool = NULL;
6284         struct mlx5_devx_obj *dcs = NULL;
6285         uint32_t i;
6286         uint32_t log_obj_size;
6287
6288         log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6289         dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->ctx,
6290                         priv->sh->pdn, log_obj_size);
6291         if (!dcs) {
6292                 rte_errno = ENODATA;
6293                 return NULL;
6294         }
6295         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6296         if (!pool) {
6297                 rte_errno = ENOMEM;
6298                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6299                 return NULL;
6300         }
6301         pool->devx_obj = dcs;
6302         pool->index = pools_mng->n_valid;
6303         if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6304                 mlx5_free(pool);
6305                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6306                 return NULL;
6307         }
6308         pools_mng->pools[pool->index] = pool;
6309         pools_mng->n_valid++;
6310         for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6311                 pool->mtrs[i].offset = i;
6312                 LIST_INSERT_HEAD(&pools_mng->meters,
6313                                                 &pool->mtrs[i], next);
6314         }
6315         pool->mtrs[0].offset = 0;
6316         *mtr_free = &pool->mtrs[0];
6317         return pool;
6318 }
6319
6320 /**
6321  * Release a flow meter into pool.
6322  *
6323  * @param[in] dev
6324  *   Pointer to the Ethernet device structure.
6325  * @param[in] mtr_idx
6326  *   Index to aso flow meter.
6327  */
6328 static void
6329 flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6330 {
6331         struct mlx5_priv *priv = dev->data->dev_private;
6332         struct mlx5_aso_mtr_pools_mng *pools_mng =
6333                                 &priv->sh->mtrmng->pools_mng;
6334         struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6335
6336         MLX5_ASSERT(aso_mtr);
6337         rte_spinlock_lock(&pools_mng->mtrsl);
6338         memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6339         aso_mtr->state = ASO_METER_FREE;
6340         LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6341         rte_spinlock_unlock(&pools_mng->mtrsl);
6342 }
6343
6344 /**
6345  * Allocate a aso flow meter.
6346  *
6347  * @param[in] dev
6348  *   Pointer to the Ethernet device structure.
6349  *
6350  * @return
6351  *   Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6352  */
6353 static uint32_t
6354 flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6355 {
6356         struct mlx5_priv *priv = dev->data->dev_private;
6357         struct mlx5_aso_mtr *mtr_free = NULL;
6358         struct mlx5_aso_mtr_pools_mng *pools_mng =
6359                                 &priv->sh->mtrmng->pools_mng;
6360         struct mlx5_aso_mtr_pool *pool;
6361         uint32_t mtr_idx = 0;
6362
6363         if (!priv->config.devx) {
6364                 rte_errno = ENOTSUP;
6365                 return 0;
6366         }
6367         /* Allocate the flow meter memory. */
6368         /* Get free meters from management. */
6369         rte_spinlock_lock(&pools_mng->mtrsl);
6370         mtr_free = LIST_FIRST(&pools_mng->meters);
6371         if (mtr_free)
6372                 LIST_REMOVE(mtr_free, next);
6373         if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6374                 rte_spinlock_unlock(&pools_mng->mtrsl);
6375                 return 0;
6376         }
6377         mtr_free->state = ASO_METER_WAIT;
6378         rte_spinlock_unlock(&pools_mng->mtrsl);
6379         pool = container_of(mtr_free,
6380                         struct mlx5_aso_mtr_pool,
6381                         mtrs[mtr_free->offset]);
6382         mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6383         if (!mtr_free->fm.meter_action) {
6384 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6385                 struct rte_flow_error error;
6386                 uint8_t reg_id;
6387
6388                 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6389                 mtr_free->fm.meter_action =
6390                         mlx5_glue->dv_create_flow_action_aso
6391                                                 (priv->sh->rx_domain,
6392                                                  pool->devx_obj->obj,
6393                                                  mtr_free->offset,
6394                                                  (1 << MLX5_FLOW_COLOR_GREEN),
6395                                                  reg_id - REG_C_0);
6396 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6397                 if (!mtr_free->fm.meter_action) {
6398                         flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6399                         return 0;
6400                 }
6401         }
6402         return mtr_idx;
6403 }
6404
6405 /**
6406  * Verify the @p attributes will be correctly understood by the NIC and store
6407  * them in the @p flow if everything is correct.
6408  *
6409  * @param[in] dev
6410  *   Pointer to dev struct.
6411  * @param[in] attributes
6412  *   Pointer to flow attributes
6413  * @param[in] external
6414  *   This flow rule is created by request external to PMD.
6415  * @param[out] error
6416  *   Pointer to error structure.
6417  *
6418  * @return
6419  *   - 0 on success and non root table.
6420  *   - 1 on success and root table.
6421  *   - a negative errno value otherwise and rte_errno is set.
6422  */
6423 static int
6424 flow_dv_validate_attributes(struct rte_eth_dev *dev,
6425                             const struct mlx5_flow_tunnel *tunnel,
6426                             const struct rte_flow_attr *attributes,
6427                             const struct flow_grp_info *grp_info,
6428                             struct rte_flow_error *error)
6429 {
6430         struct mlx5_priv *priv = dev->data->dev_private;
6431         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6432         int ret = 0;
6433
6434 #ifndef HAVE_MLX5DV_DR
6435         RTE_SET_USED(tunnel);
6436         RTE_SET_USED(grp_info);
6437         if (attributes->group)
6438                 return rte_flow_error_set(error, ENOTSUP,
6439                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6440                                           NULL,
6441                                           "groups are not supported");
6442 #else
6443         uint32_t table = 0;
6444
6445         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6446                                        grp_info, error);
6447         if (ret)
6448                 return ret;
6449         if (!table)
6450                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6451 #endif
6452         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6453             attributes->priority > lowest_priority)
6454                 return rte_flow_error_set(error, ENOTSUP,
6455                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6456                                           NULL,
6457                                           "priority out of range");
6458         if (attributes->transfer) {
6459                 if (!priv->config.dv_esw_en)
6460                         return rte_flow_error_set
6461                                 (error, ENOTSUP,
6462                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6463                                  "E-Switch dr is not supported");
6464                 if (!(priv->representor || priv->master))
6465                         return rte_flow_error_set
6466                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6467                                  NULL, "E-Switch configuration can only be"
6468                                  " done by a master or a representor device");
6469                 if (attributes->egress)
6470                         return rte_flow_error_set
6471                                 (error, ENOTSUP,
6472                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6473                                  "egress is not supported");
6474         }
6475         if (!(attributes->egress ^ attributes->ingress))
6476                 return rte_flow_error_set(error, ENOTSUP,
6477                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6478                                           "must specify exactly one of "
6479                                           "ingress or egress");
6480         return ret;
6481 }
6482
6483 static uint16_t
6484 mlx5_flow_locate_proto_l3(const struct rte_flow_item **head,
6485                           const struct rte_flow_item *end)
6486 {
6487         const struct rte_flow_item *item = *head;
6488         uint16_t l3_protocol;
6489
6490         for (; item != end; item++) {
6491                 switch (item->type) {
6492                 default:
6493                         break;
6494                 case RTE_FLOW_ITEM_TYPE_IPV4:
6495                         l3_protocol = RTE_ETHER_TYPE_IPV4;
6496                         goto l3_ok;
6497                 case RTE_FLOW_ITEM_TYPE_IPV6:
6498                         l3_protocol = RTE_ETHER_TYPE_IPV6;
6499                         goto l3_ok;
6500                 case RTE_FLOW_ITEM_TYPE_ETH:
6501                         if (item->mask && item->spec) {
6502                                 MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_eth,
6503                                                             type, item,
6504                                                             l3_protocol);
6505                                 if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
6506                                     l3_protocol == RTE_ETHER_TYPE_IPV6)
6507                                         goto l3_ok;
6508                         }
6509                         break;
6510                 case RTE_FLOW_ITEM_TYPE_VLAN:
6511                         if (item->mask && item->spec) {
6512                                 MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_vlan,
6513                                                             inner_type, item,
6514                                                             l3_protocol);
6515                                 if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
6516                                     l3_protocol == RTE_ETHER_TYPE_IPV6)
6517                                         goto l3_ok;
6518                         }
6519                         break;
6520                 }
6521         }
6522         return 0;
6523 l3_ok:
6524         *head = item;
6525         return l3_protocol;
6526 }
6527
6528 static uint8_t
6529 mlx5_flow_locate_proto_l4(const struct rte_flow_item **head,
6530                           const struct rte_flow_item *end)
6531 {
6532         const struct rte_flow_item *item = *head;
6533         uint8_t l4_protocol;
6534
6535         for (; item != end; item++) {
6536                 switch (item->type) {
6537                 default:
6538                         break;
6539                 case RTE_FLOW_ITEM_TYPE_TCP:
6540                         l4_protocol = IPPROTO_TCP;
6541                         goto l4_ok;
6542                 case RTE_FLOW_ITEM_TYPE_UDP:
6543                         l4_protocol = IPPROTO_UDP;
6544                         goto l4_ok;
6545                 case RTE_FLOW_ITEM_TYPE_IPV4:
6546                         if (item->mask && item->spec) {
6547                                 const struct rte_flow_item_ipv4 *mask, *spec;
6548
6549                                 mask = (typeof(mask))item->mask;
6550                                 spec = (typeof(spec))item->spec;
6551                                 l4_protocol = mask->hdr.next_proto_id &
6552                                               spec->hdr.next_proto_id;
6553                                 if (l4_protocol == IPPROTO_TCP ||
6554                                     l4_protocol == IPPROTO_UDP)
6555                                         goto l4_ok;
6556                         }
6557                         break;
6558                 case RTE_FLOW_ITEM_TYPE_IPV6:
6559                         if (item->mask && item->spec) {
6560                                 const struct rte_flow_item_ipv6 *mask, *spec;
6561                                 mask = (typeof(mask))item->mask;
6562                                 spec = (typeof(spec))item->spec;
6563                                 l4_protocol = mask->hdr.proto & spec->hdr.proto;
6564                                 if (l4_protocol == IPPROTO_TCP ||
6565                                     l4_protocol == IPPROTO_UDP)
6566                                         goto l4_ok;
6567                         }
6568                         break;
6569                 }
6570         }
6571         return 0;
6572 l4_ok:
6573         *head = item;
6574         return l4_protocol;
6575 }
6576
6577 static int
6578 flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
6579                                 const struct rte_flow_item *rule_items,
6580                                 const struct rte_flow_item *integrity_item,
6581                                 struct rte_flow_error *error)
6582 {
6583         struct mlx5_priv *priv = dev->data->dev_private;
6584         const struct rte_flow_item *tunnel_item, *end_item, *item = rule_items;
6585         const struct rte_flow_item_integrity *mask = (typeof(mask))
6586                                                      integrity_item->mask;
6587         const struct rte_flow_item_integrity *spec = (typeof(spec))
6588                                                      integrity_item->spec;
6589         uint32_t protocol;
6590
6591         if (!priv->config.hca_attr.pkt_integrity_match)
6592                 return rte_flow_error_set(error, ENOTSUP,
6593                                           RTE_FLOW_ERROR_TYPE_ITEM,
6594                                           integrity_item,
6595                                           "packet integrity integrity_item not supported");
6596         if (!mask)
6597                 mask = &rte_flow_item_integrity_mask;
6598         if (!mlx5_validate_integrity_item(mask))
6599                 return rte_flow_error_set(error, ENOTSUP,
6600                                           RTE_FLOW_ERROR_TYPE_ITEM,
6601                                           integrity_item,
6602                                           "unsupported integrity filter");
6603         tunnel_item = mlx5_flow_find_tunnel_item(rule_items);
6604         if (spec->level > 1) {
6605                 if (!tunnel_item)
6606                         return rte_flow_error_set(error, ENOTSUP,
6607                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6608                                                   integrity_item,
6609                                                   "missing tunnel item");
6610                 item = tunnel_item;
6611                 end_item = mlx5_find_end_item(tunnel_item);
6612         } else {
6613                 end_item = tunnel_item ? tunnel_item :
6614                            mlx5_find_end_item(integrity_item);
6615         }
6616         if (mask->l3_ok || mask->ipv4_csum_ok) {
6617                 protocol = mlx5_flow_locate_proto_l3(&item, end_item);
6618                 if (!protocol)
6619                         return rte_flow_error_set(error, EINVAL,
6620                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6621                                                   integrity_item,
6622                                                   "missing L3 protocol");
6623         }
6624         if (mask->l4_ok || mask->l4_csum_ok) {
6625                 protocol = mlx5_flow_locate_proto_l4(&item, end_item);
6626                 if (!protocol)
6627                         return rte_flow_error_set(error, EINVAL,
6628                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6629                                                   integrity_item,
6630                                                   "missing L4 protocol");
6631         }
6632         return 0;
6633 }
6634
6635 /**
6636  * Internal validation function. For validating both actions and items.
6637  *
6638  * @param[in] dev
6639  *   Pointer to the rte_eth_dev structure.
6640  * @param[in] attr
6641  *   Pointer to the flow attributes.
6642  * @param[in] items
6643  *   Pointer to the list of items.
6644  * @param[in] actions
6645  *   Pointer to the list of actions.
6646  * @param[in] external
6647  *   This flow rule is created by request external to PMD.
6648  * @param[in] hairpin
6649  *   Number of hairpin TX actions, 0 means classic flow.
6650  * @param[out] error
6651  *   Pointer to the error structure.
6652  *
6653  * @return
6654  *   0 on success, a negative errno value otherwise and rte_errno is set.
6655  */
6656 static int
6657 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6658                  const struct rte_flow_item items[],
6659                  const struct rte_flow_action actions[],
6660                  bool external, int hairpin, struct rte_flow_error *error)
6661 {
6662         int ret;
6663         uint64_t action_flags = 0;
6664         uint64_t item_flags = 0;
6665         uint64_t last_item = 0;
6666         uint8_t next_protocol = 0xff;
6667         uint16_t ether_type = 0;
6668         int actions_n = 0;
6669         uint8_t item_ipv6_proto = 0;
6670         int fdb_mirror_limit = 0;
6671         int modify_after_mirror = 0;
6672         const struct rte_flow_item *geneve_item = NULL;
6673         const struct rte_flow_item *gre_item = NULL;
6674         const struct rte_flow_item *gtp_item = NULL;
6675         const struct rte_flow_action_raw_decap *decap;
6676         const struct rte_flow_action_raw_encap *encap;
6677         const struct rte_flow_action_rss *rss = NULL;
6678         const struct rte_flow_action_rss *sample_rss = NULL;
6679         const struct rte_flow_action_count *sample_count = NULL;
6680         const struct rte_flow_item_tcp nic_tcp_mask = {
6681                 .hdr = {
6682                         .tcp_flags = 0xFF,
6683                         .src_port = RTE_BE16(UINT16_MAX),
6684                         .dst_port = RTE_BE16(UINT16_MAX),
6685                 }
6686         };
6687         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6688                 .hdr = {
6689                         .src_addr =
6690                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6691                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6692                         .dst_addr =
6693                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6694                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6695                         .vtc_flow = RTE_BE32(0xffffffff),
6696                         .proto = 0xff,
6697                         .hop_limits = 0xff,
6698                 },
6699                 .has_frag_ext = 1,
6700         };
6701         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6702                 .hdr = {
6703                         .common = {
6704                                 .u32 =
6705                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6706                                         .type = 0xFF,
6707                                         }).u32),
6708                         },
6709                         .dummy[0] = 0xffffffff,
6710                 },
6711         };
6712         struct mlx5_priv *priv = dev->data->dev_private;
6713         struct mlx5_dev_config *dev_conf = &priv->config;
6714         uint16_t queue_index = 0xFFFF;
6715         const struct rte_flow_item_vlan *vlan_m = NULL;
6716         uint32_t rw_act_num = 0;
6717         uint64_t is_root;
6718         const struct mlx5_flow_tunnel *tunnel;
6719         enum mlx5_tof_rule_type tof_rule_type;
6720         struct flow_grp_info grp_info = {
6721                 .external = !!external,
6722                 .transfer = !!attr->transfer,
6723                 .fdb_def_rule = !!priv->fdb_def_rule,
6724                 .std_tbl_fix = true,
6725         };
6726         const struct rte_eth_hairpin_conf *conf;
6727         const struct rte_flow_item *rule_items = items;
6728         const struct rte_flow_item *port_id_item = NULL;
6729         bool def_policy = false;
6730
6731         if (items == NULL)
6732                 return -1;
6733         tunnel = is_tunnel_offload_active(dev) ?
6734                  mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
6735         if (tunnel) {
6736                 if (priv->representor)
6737                         return rte_flow_error_set
6738                                 (error, ENOTSUP,
6739                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6740                                  NULL, "decap not supported for VF representor");
6741                 if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
6742                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6743                 else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
6744                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6745                                         MLX5_FLOW_ACTION_DECAP;
6746                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6747                                         (dev, attr, tunnel, tof_rule_type);
6748         }
6749         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6750         if (ret < 0)
6751                 return ret;
6752         is_root = (uint64_t)ret;
6753         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6754                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6755                 int type = items->type;
6756
6757                 if (!mlx5_flow_os_item_supported(type))
6758                         return rte_flow_error_set(error, ENOTSUP,
6759                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6760                                                   NULL, "item not supported");
6761                 switch (type) {
6762                 case RTE_FLOW_ITEM_TYPE_VOID:
6763                         break;
6764                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6765                         ret = flow_dv_validate_item_port_id
6766                                         (dev, items, attr, item_flags, error);
6767                         if (ret < 0)
6768                                 return ret;
6769                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6770                         port_id_item = items;
6771                         break;
6772                 case RTE_FLOW_ITEM_TYPE_ETH:
6773                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6774                                                           true, error);
6775                         if (ret < 0)
6776                                 return ret;
6777                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6778                                              MLX5_FLOW_LAYER_OUTER_L2;
6779                         if (items->mask != NULL && items->spec != NULL) {
6780                                 ether_type =
6781                                         ((const struct rte_flow_item_eth *)
6782                                          items->spec)->type;
6783                                 ether_type &=
6784                                         ((const struct rte_flow_item_eth *)
6785                                          items->mask)->type;
6786                                 ether_type = rte_be_to_cpu_16(ether_type);
6787                         } else {
6788                                 ether_type = 0;
6789                         }
6790                         break;
6791                 case RTE_FLOW_ITEM_TYPE_VLAN:
6792                         ret = flow_dv_validate_item_vlan(items, item_flags,
6793                                                          dev, error);
6794                         if (ret < 0)
6795                                 return ret;
6796                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6797                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6798                         if (items->mask != NULL && items->spec != NULL) {
6799                                 ether_type =
6800                                         ((const struct rte_flow_item_vlan *)
6801                                          items->spec)->inner_type;
6802                                 ether_type &=
6803                                         ((const struct rte_flow_item_vlan *)
6804                                          items->mask)->inner_type;
6805                                 ether_type = rte_be_to_cpu_16(ether_type);
6806                         } else {
6807                                 ether_type = 0;
6808                         }
6809                         /* Store outer VLAN mask for of_push_vlan action. */
6810                         if (!tunnel)
6811                                 vlan_m = items->mask;
6812                         break;
6813                 case RTE_FLOW_ITEM_TYPE_IPV4:
6814                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6815                                                   &item_flags, &tunnel);
6816                         ret = flow_dv_validate_item_ipv4(items, item_flags,
6817                                                          last_item, ether_type,
6818                                                          error);
6819                         if (ret < 0)
6820                                 return ret;
6821                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
6822                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
6823                         if (items->mask != NULL &&
6824                             ((const struct rte_flow_item_ipv4 *)
6825                              items->mask)->hdr.next_proto_id) {
6826                                 next_protocol =
6827                                         ((const struct rte_flow_item_ipv4 *)
6828                                          (items->spec))->hdr.next_proto_id;
6829                                 next_protocol &=
6830                                         ((const struct rte_flow_item_ipv4 *)
6831                                          (items->mask))->hdr.next_proto_id;
6832                         } else {
6833                                 /* Reset for inner layer. */
6834                                 next_protocol = 0xff;
6835                         }
6836                         break;
6837                 case RTE_FLOW_ITEM_TYPE_IPV6:
6838                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6839                                                   &item_flags, &tunnel);
6840                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
6841                                                            last_item,
6842                                                            ether_type,
6843                                                            &nic_ipv6_mask,
6844                                                            error);
6845                         if (ret < 0)
6846                                 return ret;
6847                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
6848                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
6849                         if (items->mask != NULL &&
6850                             ((const struct rte_flow_item_ipv6 *)
6851                              items->mask)->hdr.proto) {
6852                                 item_ipv6_proto =
6853                                         ((const struct rte_flow_item_ipv6 *)
6854                                          items->spec)->hdr.proto;
6855                                 next_protocol =
6856                                         ((const struct rte_flow_item_ipv6 *)
6857                                          items->spec)->hdr.proto;
6858                                 next_protocol &=
6859                                         ((const struct rte_flow_item_ipv6 *)
6860                                          items->mask)->hdr.proto;
6861                         } else {
6862                                 /* Reset for inner layer. */
6863                                 next_protocol = 0xff;
6864                         }
6865                         break;
6866                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
6867                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
6868                                                                   item_flags,
6869                                                                   error);
6870                         if (ret < 0)
6871                                 return ret;
6872                         last_item = tunnel ?
6873                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
6874                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
6875                         if (items->mask != NULL &&
6876                             ((const struct rte_flow_item_ipv6_frag_ext *)
6877                              items->mask)->hdr.next_header) {
6878                                 next_protocol =
6879                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6880                                  items->spec)->hdr.next_header;
6881                                 next_protocol &=
6882                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6883                                  items->mask)->hdr.next_header;
6884                         } else {
6885                                 /* Reset for inner layer. */
6886                                 next_protocol = 0xff;
6887                         }
6888                         break;
6889                 case RTE_FLOW_ITEM_TYPE_TCP:
6890                         ret = mlx5_flow_validate_item_tcp
6891                                                 (items, item_flags,
6892                                                  next_protocol,
6893                                                  &nic_tcp_mask,
6894                                                  error);
6895                         if (ret < 0)
6896                                 return ret;
6897                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
6898                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
6899                         break;
6900                 case RTE_FLOW_ITEM_TYPE_UDP:
6901                         ret = mlx5_flow_validate_item_udp(items, item_flags,
6902                                                           next_protocol,
6903                                                           error);
6904                         if (ret < 0)
6905                                 return ret;
6906                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
6907                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
6908                         break;
6909                 case RTE_FLOW_ITEM_TYPE_GRE:
6910                         ret = mlx5_flow_validate_item_gre(items, item_flags,
6911                                                           next_protocol, error);
6912                         if (ret < 0)
6913                                 return ret;
6914                         gre_item = items;
6915                         last_item = MLX5_FLOW_LAYER_GRE;
6916                         break;
6917                 case RTE_FLOW_ITEM_TYPE_NVGRE:
6918                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
6919                                                             next_protocol,
6920                                                             error);
6921                         if (ret < 0)
6922                                 return ret;
6923                         last_item = MLX5_FLOW_LAYER_NVGRE;
6924                         break;
6925                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6926                         ret = mlx5_flow_validate_item_gre_key
6927                                 (items, item_flags, gre_item, error);
6928                         if (ret < 0)
6929                                 return ret;
6930                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
6931                         break;
6932                 case RTE_FLOW_ITEM_TYPE_VXLAN:
6933                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
6934                                                             error);
6935                         if (ret < 0)
6936                                 return ret;
6937                         last_item = MLX5_FLOW_LAYER_VXLAN;
6938                         break;
6939                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6940                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
6941                                                                 item_flags, dev,
6942                                                                 error);
6943                         if (ret < 0)
6944                                 return ret;
6945                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
6946                         break;
6947                 case RTE_FLOW_ITEM_TYPE_GENEVE:
6948                         ret = mlx5_flow_validate_item_geneve(items,
6949                                                              item_flags, dev,
6950                                                              error);
6951                         if (ret < 0)
6952                                 return ret;
6953                         geneve_item = items;
6954                         last_item = MLX5_FLOW_LAYER_GENEVE;
6955                         break;
6956                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
6957                         ret = mlx5_flow_validate_item_geneve_opt(items,
6958                                                                  last_item,
6959                                                                  geneve_item,
6960                                                                  dev,
6961                                                                  error);
6962                         if (ret < 0)
6963                                 return ret;
6964                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
6965                         break;
6966                 case RTE_FLOW_ITEM_TYPE_MPLS:
6967                         ret = mlx5_flow_validate_item_mpls(dev, items,
6968                                                            item_flags,
6969                                                            last_item, error);
6970                         if (ret < 0)
6971                                 return ret;
6972                         last_item = MLX5_FLOW_LAYER_MPLS;
6973                         break;
6974
6975                 case RTE_FLOW_ITEM_TYPE_MARK:
6976                         ret = flow_dv_validate_item_mark(dev, items, attr,
6977                                                          error);
6978                         if (ret < 0)
6979                                 return ret;
6980                         last_item = MLX5_FLOW_ITEM_MARK;
6981                         break;
6982                 case RTE_FLOW_ITEM_TYPE_META:
6983                         ret = flow_dv_validate_item_meta(dev, items, attr,
6984                                                          error);
6985                         if (ret < 0)
6986                                 return ret;
6987                         last_item = MLX5_FLOW_ITEM_METADATA;
6988                         break;
6989                 case RTE_FLOW_ITEM_TYPE_ICMP:
6990                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
6991                                                            next_protocol,
6992                                                            error);
6993                         if (ret < 0)
6994                                 return ret;
6995                         last_item = MLX5_FLOW_LAYER_ICMP;
6996                         break;
6997                 case RTE_FLOW_ITEM_TYPE_ICMP6:
6998                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
6999                                                             next_protocol,
7000                                                             error);
7001                         if (ret < 0)
7002                                 return ret;
7003                         item_ipv6_proto = IPPROTO_ICMPV6;
7004                         last_item = MLX5_FLOW_LAYER_ICMP6;
7005                         break;
7006                 case RTE_FLOW_ITEM_TYPE_TAG:
7007                         ret = flow_dv_validate_item_tag(dev, items,
7008                                                         attr, error);
7009                         if (ret < 0)
7010                                 return ret;
7011                         last_item = MLX5_FLOW_ITEM_TAG;
7012                         break;
7013                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
7014                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
7015                         break;
7016                 case RTE_FLOW_ITEM_TYPE_GTP:
7017                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
7018                                                         error);
7019                         if (ret < 0)
7020                                 return ret;
7021                         gtp_item = items;
7022                         last_item = MLX5_FLOW_LAYER_GTP;
7023                         break;
7024                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
7025                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
7026                                                             gtp_item, attr,
7027                                                             error);
7028                         if (ret < 0)
7029                                 return ret;
7030                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
7031                         break;
7032                 case RTE_FLOW_ITEM_TYPE_ECPRI:
7033                         /* Capacity will be checked in the translate stage. */
7034                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
7035                                                             last_item,
7036                                                             ether_type,
7037                                                             &nic_ecpri_mask,
7038                                                             error);
7039                         if (ret < 0)
7040                                 return ret;
7041                         last_item = MLX5_FLOW_LAYER_ECPRI;
7042                         break;
7043                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
7044                         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY)
7045                                 return rte_flow_error_set
7046                                         (error, ENOTSUP,
7047                                          RTE_FLOW_ERROR_TYPE_ITEM,
7048                                          NULL, "multiple integrity items not supported");
7049                         ret = flow_dv_validate_item_integrity(dev, rule_items,
7050                                                               items, error);
7051                         if (ret < 0)
7052                                 return ret;
7053                         last_item = MLX5_FLOW_ITEM_INTEGRITY;
7054                         break;
7055                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
7056                         ret = flow_dv_validate_item_aso_ct(dev, items,
7057                                                            &item_flags, error);
7058                         if (ret < 0)
7059                                 return ret;
7060                         break;
7061                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
7062                         /* tunnel offload item was processed before
7063                          * list it here as a supported type
7064                          */
7065                         break;
7066                 default:
7067                         return rte_flow_error_set(error, ENOTSUP,
7068                                                   RTE_FLOW_ERROR_TYPE_ITEM,
7069                                                   NULL, "item not supported");
7070                 }
7071                 item_flags |= last_item;
7072         }
7073         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
7074                 int type = actions->type;
7075                 bool shared_count = false;
7076
7077                 if (!mlx5_flow_os_action_supported(type))
7078                         return rte_flow_error_set(error, ENOTSUP,
7079                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7080                                                   actions,
7081                                                   "action not supported");
7082                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
7083                         return rte_flow_error_set(error, ENOTSUP,
7084                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7085                                                   actions, "too many actions");
7086                 if (action_flags &
7087                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
7088                         return rte_flow_error_set(error, ENOTSUP,
7089                                 RTE_FLOW_ERROR_TYPE_ACTION,
7090                                 NULL, "meter action with policy "
7091                                 "must be the last action");
7092                 switch (type) {
7093                 case RTE_FLOW_ACTION_TYPE_VOID:
7094                         break;
7095                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7096                         ret = flow_dv_validate_action_port_id(dev,
7097                                                               action_flags,
7098                                                               actions,
7099                                                               attr,
7100                                                               error);
7101                         if (ret)
7102                                 return ret;
7103                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7104                         ++actions_n;
7105                         break;
7106                 case RTE_FLOW_ACTION_TYPE_FLAG:
7107                         ret = flow_dv_validate_action_flag(dev, action_flags,
7108                                                            attr, error);
7109                         if (ret < 0)
7110                                 return ret;
7111                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7112                                 /* Count all modify-header actions as one. */
7113                                 if (!(action_flags &
7114                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7115                                         ++actions_n;
7116                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
7117                                                 MLX5_FLOW_ACTION_MARK_EXT;
7118                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7119                                         modify_after_mirror = 1;
7120
7121                         } else {
7122                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
7123                                 ++actions_n;
7124                         }
7125                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7126                         break;
7127                 case RTE_FLOW_ACTION_TYPE_MARK:
7128                         ret = flow_dv_validate_action_mark(dev, actions,
7129                                                            action_flags,
7130                                                            attr, error);
7131                         if (ret < 0)
7132                                 return ret;
7133                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7134                                 /* Count all modify-header actions as one. */
7135                                 if (!(action_flags &
7136                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7137                                         ++actions_n;
7138                                 action_flags |= MLX5_FLOW_ACTION_MARK |
7139                                                 MLX5_FLOW_ACTION_MARK_EXT;
7140                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7141                                         modify_after_mirror = 1;
7142                         } else {
7143                                 action_flags |= MLX5_FLOW_ACTION_MARK;
7144                                 ++actions_n;
7145                         }
7146                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7147                         break;
7148                 case RTE_FLOW_ACTION_TYPE_SET_META:
7149                         ret = flow_dv_validate_action_set_meta(dev, actions,
7150                                                                action_flags,
7151                                                                attr, error);
7152                         if (ret < 0)
7153                                 return ret;
7154                         /* Count all modify-header actions as one action. */
7155                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7156                                 ++actions_n;
7157                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7158                                 modify_after_mirror = 1;
7159                         action_flags |= MLX5_FLOW_ACTION_SET_META;
7160                         rw_act_num += MLX5_ACT_NUM_SET_META;
7161                         break;
7162                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7163                         ret = flow_dv_validate_action_set_tag(dev, actions,
7164                                                               action_flags,
7165                                                               attr, error);
7166                         if (ret < 0)
7167                                 return ret;
7168                         /* Count all modify-header actions as one action. */
7169                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7170                                 ++actions_n;
7171                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7172                                 modify_after_mirror = 1;
7173                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7174                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7175                         break;
7176                 case RTE_FLOW_ACTION_TYPE_DROP:
7177                         ret = mlx5_flow_validate_action_drop(action_flags,
7178                                                              attr, error);
7179                         if (ret < 0)
7180                                 return ret;
7181                         action_flags |= MLX5_FLOW_ACTION_DROP;
7182                         ++actions_n;
7183                         break;
7184                 case RTE_FLOW_ACTION_TYPE_QUEUE:
7185                         ret = mlx5_flow_validate_action_queue(actions,
7186                                                               action_flags, dev,
7187                                                               attr, error);
7188                         if (ret < 0)
7189                                 return ret;
7190                         queue_index = ((const struct rte_flow_action_queue *)
7191                                                         (actions->conf))->index;
7192                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
7193                         ++actions_n;
7194                         break;
7195                 case RTE_FLOW_ACTION_TYPE_RSS:
7196                         rss = actions->conf;
7197                         ret = mlx5_flow_validate_action_rss(actions,
7198                                                             action_flags, dev,
7199                                                             attr, item_flags,
7200                                                             error);
7201                         if (ret < 0)
7202                                 return ret;
7203                         if (rss && sample_rss &&
7204                             (sample_rss->level != rss->level ||
7205                             sample_rss->types != rss->types))
7206                                 return rte_flow_error_set(error, ENOTSUP,
7207                                         RTE_FLOW_ERROR_TYPE_ACTION,
7208                                         NULL,
7209                                         "Can't use the different RSS types "
7210                                         "or level in the same flow");
7211                         if (rss != NULL && rss->queue_num)
7212                                 queue_index = rss->queue[0];
7213                         action_flags |= MLX5_FLOW_ACTION_RSS;
7214                         ++actions_n;
7215                         break;
7216                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7217                         ret =
7218                         mlx5_flow_validate_action_default_miss(action_flags,
7219                                         attr, error);
7220                         if (ret < 0)
7221                                 return ret;
7222                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7223                         ++actions_n;
7224                         break;
7225                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
7226                 case RTE_FLOW_ACTION_TYPE_COUNT:
7227                         shared_count = is_shared_action_count(actions);
7228                         ret = flow_dv_validate_action_count(dev, shared_count,
7229                                                             action_flags,
7230                                                             error);
7231                         if (ret < 0)
7232                                 return ret;
7233                         action_flags |= MLX5_FLOW_ACTION_COUNT;
7234                         ++actions_n;
7235                         break;
7236                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7237                         if (flow_dv_validate_action_pop_vlan(dev,
7238                                                              action_flags,
7239                                                              actions,
7240                                                              item_flags, attr,
7241                                                              error))
7242                                 return -rte_errno;
7243                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7244                                 modify_after_mirror = 1;
7245                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7246                         ++actions_n;
7247                         break;
7248                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7249                         ret = flow_dv_validate_action_push_vlan(dev,
7250                                                                 action_flags,
7251                                                                 vlan_m,
7252                                                                 actions, attr,
7253                                                                 error);
7254                         if (ret < 0)
7255                                 return ret;
7256                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7257                                 modify_after_mirror = 1;
7258                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7259                         ++actions_n;
7260                         break;
7261                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7262                         ret = flow_dv_validate_action_set_vlan_pcp
7263                                                 (action_flags, actions, error);
7264                         if (ret < 0)
7265                                 return ret;
7266                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7267                                 modify_after_mirror = 1;
7268                         /* Count PCP with push_vlan command. */
7269                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
7270                         break;
7271                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7272                         ret = flow_dv_validate_action_set_vlan_vid
7273                                                 (item_flags, action_flags,
7274                                                  actions, error);
7275                         if (ret < 0)
7276                                 return ret;
7277                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7278                                 modify_after_mirror = 1;
7279                         /* Count VID with push_vlan command. */
7280                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7281                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
7282                         break;
7283                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7284                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7285                         ret = flow_dv_validate_action_l2_encap(dev,
7286                                                                action_flags,
7287                                                                actions, attr,
7288                                                                error);
7289                         if (ret < 0)
7290                                 return ret;
7291                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
7292                         ++actions_n;
7293                         break;
7294                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7295                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7296                         ret = flow_dv_validate_action_decap(dev, action_flags,
7297                                                             actions, item_flags,
7298                                                             attr, error);
7299                         if (ret < 0)
7300                                 return ret;
7301                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7302                                 modify_after_mirror = 1;
7303                         action_flags |= MLX5_FLOW_ACTION_DECAP;
7304                         ++actions_n;
7305                         break;
7306                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7307                         ret = flow_dv_validate_action_raw_encap_decap
7308                                 (dev, NULL, actions->conf, attr, &action_flags,
7309                                  &actions_n, actions, item_flags, error);
7310                         if (ret < 0)
7311                                 return ret;
7312                         break;
7313                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7314                         decap = actions->conf;
7315                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
7316                                 ;
7317                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7318                                 encap = NULL;
7319                                 actions--;
7320                         } else {
7321                                 encap = actions->conf;
7322                         }
7323                         ret = flow_dv_validate_action_raw_encap_decap
7324                                            (dev,
7325                                             decap ? decap : &empty_decap, encap,
7326                                             attr, &action_flags, &actions_n,
7327                                             actions, item_flags, error);
7328                         if (ret < 0)
7329                                 return ret;
7330                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7331                             (action_flags & MLX5_FLOW_ACTION_DECAP))
7332                                 modify_after_mirror = 1;
7333                         break;
7334                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7335                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7336                         ret = flow_dv_validate_action_modify_mac(action_flags,
7337                                                                  actions,
7338                                                                  item_flags,
7339                                                                  error);
7340                         if (ret < 0)
7341                                 return ret;
7342                         /* Count all modify-header actions as one action. */
7343                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7344                                 ++actions_n;
7345                         action_flags |= actions->type ==
7346                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7347                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
7348                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
7349                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7350                                 modify_after_mirror = 1;
7351                         /*
7352                          * Even if the source and destination MAC addresses have
7353                          * overlap in the header with 4B alignment, the convert
7354                          * function will handle them separately and 4 SW actions
7355                          * will be created. And 2 actions will be added each
7356                          * time no matter how many bytes of address will be set.
7357                          */
7358                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
7359                         break;
7360                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7361                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7362                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
7363                                                                   actions,
7364                                                                   item_flags,
7365                                                                   error);
7366                         if (ret < 0)
7367                                 return ret;
7368                         /* Count all modify-header actions as one action. */
7369                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7370                                 ++actions_n;
7371                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7372                                 modify_after_mirror = 1;
7373                         action_flags |= actions->type ==
7374                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7375                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
7376                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
7377                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
7378                         break;
7379                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7380                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7381                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
7382                                                                   actions,
7383                                                                   item_flags,
7384                                                                   error);
7385                         if (ret < 0)
7386                                 return ret;
7387                         if (item_ipv6_proto == IPPROTO_ICMPV6)
7388                                 return rte_flow_error_set(error, ENOTSUP,
7389                                         RTE_FLOW_ERROR_TYPE_ACTION,
7390                                         actions,
7391                                         "Can't change header "
7392                                         "with ICMPv6 proto");
7393                         /* Count all modify-header actions as one action. */
7394                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7395                                 ++actions_n;
7396                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7397                                 modify_after_mirror = 1;
7398                         action_flags |= actions->type ==
7399                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7400                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7401                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
7402                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
7403                         break;
7404                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7405                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7406                         ret = flow_dv_validate_action_modify_tp(action_flags,
7407                                                                 actions,
7408                                                                 item_flags,
7409                                                                 error);
7410                         if (ret < 0)
7411                                 return ret;
7412                         /* Count all modify-header actions as one action. */
7413                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7414                                 ++actions_n;
7415                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7416                                 modify_after_mirror = 1;
7417                         action_flags |= actions->type ==
7418                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7419                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
7420                                                 MLX5_FLOW_ACTION_SET_TP_DST;
7421                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7422                         break;
7423                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7424                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7425                         ret = flow_dv_validate_action_modify_ttl(action_flags,
7426                                                                  actions,
7427                                                                  item_flags,
7428                                                                  error);
7429                         if (ret < 0)
7430                                 return ret;
7431                         /* Count all modify-header actions as one action. */
7432                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7433                                 ++actions_n;
7434                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7435                                 modify_after_mirror = 1;
7436                         action_flags |= actions->type ==
7437                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
7438                                                 MLX5_FLOW_ACTION_SET_TTL :
7439                                                 MLX5_FLOW_ACTION_DEC_TTL;
7440                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7441                         break;
7442                 case RTE_FLOW_ACTION_TYPE_JUMP:
7443                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7444                                                            action_flags,
7445                                                            attr, external,
7446                                                            error);
7447                         if (ret)
7448                                 return ret;
7449                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7450                             fdb_mirror_limit)
7451                                 return rte_flow_error_set(error, EINVAL,
7452                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7453                                                   NULL,
7454                                                   "sample and jump action combination is not supported");
7455                         ++actions_n;
7456                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7457                         break;
7458                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7459                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7460                         ret = flow_dv_validate_action_modify_tcp_seq
7461                                                                 (action_flags,
7462                                                                  actions,
7463                                                                  item_flags,
7464                                                                  error);
7465                         if (ret < 0)
7466                                 return ret;
7467                         /* Count all modify-header actions as one action. */
7468                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7469                                 ++actions_n;
7470                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7471                                 modify_after_mirror = 1;
7472                         action_flags |= actions->type ==
7473                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7474                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7475                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7476                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7477                         break;
7478                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7479                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7480                         ret = flow_dv_validate_action_modify_tcp_ack
7481                                                                 (action_flags,
7482                                                                  actions,
7483                                                                  item_flags,
7484                                                                  error);
7485                         if (ret < 0)
7486                                 return ret;
7487                         /* Count all modify-header actions as one action. */
7488                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7489                                 ++actions_n;
7490                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7491                                 modify_after_mirror = 1;
7492                         action_flags |= actions->type ==
7493                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7494                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
7495                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7496                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7497                         break;
7498                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7499                         break;
7500                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7501                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7502                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7503                         break;
7504                 case RTE_FLOW_ACTION_TYPE_METER:
7505                         ret = mlx5_flow_validate_action_meter(dev,
7506                                                               action_flags,
7507                                                               actions, attr,
7508                                                               port_id_item,
7509                                                               &def_policy,
7510                                                               error);
7511                         if (ret < 0)
7512                                 return ret;
7513                         action_flags |= MLX5_FLOW_ACTION_METER;
7514                         if (!def_policy)
7515                                 action_flags |=
7516                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7517                         ++actions_n;
7518                         /* Meter action will add one more TAG action. */
7519                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7520                         break;
7521                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7522                         if (!attr->transfer && !attr->group)
7523                                 return rte_flow_error_set(error, ENOTSUP,
7524                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7525                                                                            NULL,
7526                           "Shared ASO age action is not supported for group 0");
7527                         if (action_flags & MLX5_FLOW_ACTION_AGE)
7528                                 return rte_flow_error_set
7529                                                   (error, EINVAL,
7530                                                    RTE_FLOW_ERROR_TYPE_ACTION,
7531                                                    NULL,
7532                                                    "duplicate age actions set");
7533                         action_flags |= MLX5_FLOW_ACTION_AGE;
7534                         ++actions_n;
7535                         break;
7536                 case RTE_FLOW_ACTION_TYPE_AGE:
7537                         ret = flow_dv_validate_action_age(action_flags,
7538                                                           actions, dev,
7539                                                           error);
7540                         if (ret < 0)
7541                                 return ret;
7542                         /*
7543                          * Validate the regular AGE action (using counter)
7544                          * mutual exclusion with share counter actions.
7545                          */
7546                         if (!priv->sh->flow_hit_aso_en) {
7547                                 if (shared_count)
7548                                         return rte_flow_error_set
7549                                                 (error, EINVAL,
7550                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7551                                                 NULL,
7552                                                 "old age and shared count combination is not supported");
7553                                 if (sample_count)
7554                                         return rte_flow_error_set
7555                                                 (error, EINVAL,
7556                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7557                                                 NULL,
7558                                                 "old age action and count must be in the same sub flow");
7559                         }
7560                         action_flags |= MLX5_FLOW_ACTION_AGE;
7561                         ++actions_n;
7562                         break;
7563                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7564                         ret = flow_dv_validate_action_modify_ipv4_dscp
7565                                                          (action_flags,
7566                                                           actions,
7567                                                           item_flags,
7568                                                           error);
7569                         if (ret < 0)
7570                                 return ret;
7571                         /* Count all modify-header actions as one action. */
7572                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7573                                 ++actions_n;
7574                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7575                                 modify_after_mirror = 1;
7576                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7577                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7578                         break;
7579                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7580                         ret = flow_dv_validate_action_modify_ipv6_dscp
7581                                                                 (action_flags,
7582                                                                  actions,
7583                                                                  item_flags,
7584                                                                  error);
7585                         if (ret < 0)
7586                                 return ret;
7587                         /* Count all modify-header actions as one action. */
7588                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7589                                 ++actions_n;
7590                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7591                                 modify_after_mirror = 1;
7592                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7593                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7594                         break;
7595                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7596                         ret = flow_dv_validate_action_sample(&action_flags,
7597                                                              actions, dev,
7598                                                              attr, item_flags,
7599                                                              rss, &sample_rss,
7600                                                              &sample_count,
7601                                                              &fdb_mirror_limit,
7602                                                              error);
7603                         if (ret < 0)
7604                                 return ret;
7605                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7606                         ++actions_n;
7607                         break;
7608                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7609                         ret = flow_dv_validate_action_modify_field(dev,
7610                                                                    action_flags,
7611                                                                    actions,
7612                                                                    attr,
7613                                                                    error);
7614                         if (ret < 0)
7615                                 return ret;
7616                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7617                                 modify_after_mirror = 1;
7618                         /* Count all modify-header actions as one action. */
7619                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7620                                 ++actions_n;
7621                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7622                         rw_act_num += ret;
7623                         break;
7624                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7625                         ret = flow_dv_validate_action_aso_ct(dev, action_flags,
7626                                                              item_flags, attr,
7627                                                              error);
7628                         if (ret < 0)
7629                                 return ret;
7630                         action_flags |= MLX5_FLOW_ACTION_CT;
7631                         break;
7632                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7633                         /* tunnel offload action was processed before
7634                          * list it here as a supported type
7635                          */
7636                         break;
7637                 default:
7638                         return rte_flow_error_set(error, ENOTSUP,
7639                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7640                                                   actions,
7641                                                   "action not supported");
7642                 }
7643         }
7644         /*
7645          * Validate actions in flow rules
7646          * - Explicit decap action is prohibited by the tunnel offload API.
7647          * - Drop action in tunnel steer rule is prohibited by the API.
7648          * - Application cannot use MARK action because it's value can mask
7649          *   tunnel default miss nitification.
7650          * - JUMP in tunnel match rule has no support in current PMD
7651          *   implementation.
7652          * - TAG & META are reserved for future uses.
7653          */
7654         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7655                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
7656                                             MLX5_FLOW_ACTION_MARK     |
7657                                             MLX5_FLOW_ACTION_SET_TAG  |
7658                                             MLX5_FLOW_ACTION_SET_META |
7659                                             MLX5_FLOW_ACTION_DROP;
7660
7661                 if (action_flags & bad_actions_mask)
7662                         return rte_flow_error_set
7663                                         (error, EINVAL,
7664                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7665                                         "Invalid RTE action in tunnel "
7666                                         "set decap rule");
7667                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7668                         return rte_flow_error_set
7669                                         (error, EINVAL,
7670                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7671                                         "tunnel set decap rule must terminate "
7672                                         "with JUMP");
7673                 if (!attr->ingress)
7674                         return rte_flow_error_set
7675                                         (error, EINVAL,
7676                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7677                                         "tunnel flows for ingress traffic only");
7678         }
7679         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7680                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
7681                                             MLX5_FLOW_ACTION_MARK    |
7682                                             MLX5_FLOW_ACTION_SET_TAG |
7683                                             MLX5_FLOW_ACTION_SET_META;
7684
7685                 if (action_flags & bad_actions_mask)
7686                         return rte_flow_error_set
7687                                         (error, EINVAL,
7688                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7689                                         "Invalid RTE action in tunnel "
7690                                         "set match rule");
7691         }
7692         /*
7693          * Validate the drop action mutual exclusion with other actions.
7694          * Drop action is mutually-exclusive with any other action, except for
7695          * Count action.
7696          * Drop action compatibility with tunnel offload was already validated.
7697          */
7698         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7699                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
7700         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7701             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7702                 return rte_flow_error_set(error, EINVAL,
7703                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7704                                           "Drop action is mutually-exclusive "
7705                                           "with any other action, except for "
7706                                           "Count action");
7707         /* Eswitch has few restrictions on using items and actions */
7708         if (attr->transfer) {
7709                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7710                     action_flags & MLX5_FLOW_ACTION_FLAG)
7711                         return rte_flow_error_set(error, ENOTSUP,
7712                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7713                                                   NULL,
7714                                                   "unsupported action FLAG");
7715                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7716                     action_flags & MLX5_FLOW_ACTION_MARK)
7717                         return rte_flow_error_set(error, ENOTSUP,
7718                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7719                                                   NULL,
7720                                                   "unsupported action MARK");
7721                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7722                         return rte_flow_error_set(error, ENOTSUP,
7723                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7724                                                   NULL,
7725                                                   "unsupported action QUEUE");
7726                 if (action_flags & MLX5_FLOW_ACTION_RSS)
7727                         return rte_flow_error_set(error, ENOTSUP,
7728                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7729                                                   NULL,
7730                                                   "unsupported action RSS");
7731                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7732                         return rte_flow_error_set(error, EINVAL,
7733                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7734                                                   actions,
7735                                                   "no fate action is found");
7736         } else {
7737                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7738                         return rte_flow_error_set(error, EINVAL,
7739                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7740                                                   actions,
7741                                                   "no fate action is found");
7742         }
7743         /*
7744          * Continue validation for Xcap and VLAN actions.
7745          * If hairpin is working in explicit TX rule mode, there is no actions
7746          * splitting and the validation of hairpin ingress flow should be the
7747          * same as other standard flows.
7748          */
7749         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7750                              MLX5_FLOW_VLAN_ACTIONS)) &&
7751             (queue_index == 0xFFFF ||
7752              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7753              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7754              conf->tx_explicit != 0))) {
7755                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7756                     MLX5_FLOW_XCAP_ACTIONS)
7757                         return rte_flow_error_set(error, ENOTSUP,
7758                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7759                                                   NULL, "encap and decap "
7760                                                   "combination aren't supported");
7761                 if (!attr->transfer && attr->ingress) {
7762                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7763                                 return rte_flow_error_set
7764                                                 (error, ENOTSUP,
7765                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7766                                                  NULL, "encap is not supported"
7767                                                  " for ingress traffic");
7768                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7769                                 return rte_flow_error_set
7770                                                 (error, ENOTSUP,
7771                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7772                                                  NULL, "push VLAN action not "
7773                                                  "supported for ingress");
7774                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7775                                         MLX5_FLOW_VLAN_ACTIONS)
7776                                 return rte_flow_error_set
7777                                                 (error, ENOTSUP,
7778                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7779                                                  NULL, "no support for "
7780                                                  "multiple VLAN actions");
7781                 }
7782         }
7783         if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
7784                 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
7785                         ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
7786                         attr->ingress)
7787                         return rte_flow_error_set
7788                                 (error, ENOTSUP,
7789                                 RTE_FLOW_ERROR_TYPE_ACTION,
7790                                 NULL, "fate action not supported for "
7791                                 "meter with policy");
7792                 if (attr->egress) {
7793                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
7794                                 return rte_flow_error_set
7795                                         (error, ENOTSUP,
7796                                         RTE_FLOW_ERROR_TYPE_ACTION,
7797                                         NULL, "modify header action in egress "
7798                                         "cannot be done before meter action");
7799                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7800                                 return rte_flow_error_set
7801                                         (error, ENOTSUP,
7802                                         RTE_FLOW_ERROR_TYPE_ACTION,
7803                                         NULL, "encap action in egress "
7804                                         "cannot be done before meter action");
7805                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7806                                 return rte_flow_error_set
7807                                         (error, ENOTSUP,
7808                                         RTE_FLOW_ERROR_TYPE_ACTION,
7809                                         NULL, "push vlan action in egress "
7810                                         "cannot be done before meter action");
7811                 }
7812         }
7813         /*
7814          * Hairpin flow will add one more TAG action in TX implicit mode.
7815          * In TX explicit mode, there will be no hairpin flow ID.
7816          */
7817         if (hairpin > 0)
7818                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7819         /* extra metadata enabled: one more TAG action will be add. */
7820         if (dev_conf->dv_flow_en &&
7821             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
7822             mlx5_flow_ext_mreg_supported(dev))
7823                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7824         if (rw_act_num >
7825                         flow_dv_modify_hdr_action_max(dev, is_root)) {
7826                 return rte_flow_error_set(error, ENOTSUP,
7827                                           RTE_FLOW_ERROR_TYPE_ACTION,
7828                                           NULL, "too many header modify"
7829                                           " actions to support");
7830         }
7831         /* Eswitch egress mirror and modify flow has limitation on CX5 */
7832         if (fdb_mirror_limit && modify_after_mirror)
7833                 return rte_flow_error_set(error, EINVAL,
7834                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7835                                 "sample before modify action is not supported");
7836         return 0;
7837 }
7838
7839 /**
7840  * Internal preparation function. Allocates the DV flow size,
7841  * this size is constant.
7842  *
7843  * @param[in] dev
7844  *   Pointer to the rte_eth_dev structure.
7845  * @param[in] attr
7846  *   Pointer to the flow attributes.
7847  * @param[in] items
7848  *   Pointer to the list of items.
7849  * @param[in] actions
7850  *   Pointer to the list of actions.
7851  * @param[out] error
7852  *   Pointer to the error structure.
7853  *
7854  * @return
7855  *   Pointer to mlx5_flow object on success,
7856  *   otherwise NULL and rte_errno is set.
7857  */
7858 static struct mlx5_flow *
7859 flow_dv_prepare(struct rte_eth_dev *dev,
7860                 const struct rte_flow_attr *attr __rte_unused,
7861                 const struct rte_flow_item items[] __rte_unused,
7862                 const struct rte_flow_action actions[] __rte_unused,
7863                 struct rte_flow_error *error)
7864 {
7865         uint32_t handle_idx = 0;
7866         struct mlx5_flow *dev_flow;
7867         struct mlx5_flow_handle *dev_handle;
7868         struct mlx5_priv *priv = dev->data->dev_private;
7869         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
7870
7871         MLX5_ASSERT(wks);
7872         wks->skip_matcher_reg = 0;
7873         wks->policy = NULL;
7874         wks->final_policy = NULL;
7875         /* In case of corrupting the memory. */
7876         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
7877                 rte_flow_error_set(error, ENOSPC,
7878                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7879                                    "not free temporary device flow");
7880                 return NULL;
7881         }
7882         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
7883                                    &handle_idx);
7884         if (!dev_handle) {
7885                 rte_flow_error_set(error, ENOMEM,
7886                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7887                                    "not enough memory to create flow handle");
7888                 return NULL;
7889         }
7890         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
7891         dev_flow = &wks->flows[wks->flow_idx++];
7892         memset(dev_flow, 0, sizeof(*dev_flow));
7893         dev_flow->handle = dev_handle;
7894         dev_flow->handle_idx = handle_idx;
7895         /*
7896          * In some old rdma-core releases, before continuing, a check of the
7897          * length of matching parameter will be done at first. It needs to use
7898          * the length without misc4 param. If the flow has misc4 support, then
7899          * the length needs to be adjusted accordingly. Each param member is
7900          * aligned with a 64B boundary naturally.
7901          */
7902         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
7903                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
7904         dev_flow->ingress = attr->ingress;
7905         dev_flow->dv.transfer = attr->transfer;
7906         return dev_flow;
7907 }
7908
7909 #ifdef RTE_LIBRTE_MLX5_DEBUG
7910 /**
7911  * Sanity check for match mask and value. Similar to check_valid_spec() in
7912  * kernel driver. If unmasked bit is present in value, it returns failure.
7913  *
7914  * @param match_mask
7915  *   pointer to match mask buffer.
7916  * @param match_value
7917  *   pointer to match value buffer.
7918  *
7919  * @return
7920  *   0 if valid, -EINVAL otherwise.
7921  */
7922 static int
7923 flow_dv_check_valid_spec(void *match_mask, void *match_value)
7924 {
7925         uint8_t *m = match_mask;
7926         uint8_t *v = match_value;
7927         unsigned int i;
7928
7929         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
7930                 if (v[i] & ~m[i]) {
7931                         DRV_LOG(ERR,
7932                                 "match_value differs from match_criteria"
7933                                 " %p[%u] != %p[%u]",
7934                                 match_value, i, match_mask, i);
7935                         return -EINVAL;
7936                 }
7937         }
7938         return 0;
7939 }
7940 #endif
7941
7942 /**
7943  * Add match of ip_version.
7944  *
7945  * @param[in] group
7946  *   Flow group.
7947  * @param[in] headers_v
7948  *   Values header pointer.
7949  * @param[in] headers_m
7950  *   Masks header pointer.
7951  * @param[in] ip_version
7952  *   The IP version to set.
7953  */
7954 static inline void
7955 flow_dv_set_match_ip_version(uint32_t group,
7956                              void *headers_v,
7957                              void *headers_m,
7958                              uint8_t ip_version)
7959 {
7960         if (group == 0)
7961                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
7962         else
7963                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
7964                          ip_version);
7965         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
7966         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
7967         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
7968 }
7969
7970 /**
7971  * Add Ethernet item to matcher and to the value.
7972  *
7973  * @param[in, out] matcher
7974  *   Flow matcher.
7975  * @param[in, out] key
7976  *   Flow matcher value.
7977  * @param[in] item
7978  *   Flow pattern to translate.
7979  * @param[in] inner
7980  *   Item is inner pattern.
7981  */
7982 static void
7983 flow_dv_translate_item_eth(void *matcher, void *key,
7984                            const struct rte_flow_item *item, int inner,
7985                            uint32_t group)
7986 {
7987         const struct rte_flow_item_eth *eth_m = item->mask;
7988         const struct rte_flow_item_eth *eth_v = item->spec;
7989         const struct rte_flow_item_eth nic_mask = {
7990                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7991                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7992                 .type = RTE_BE16(0xffff),
7993                 .has_vlan = 0,
7994         };
7995         void *hdrs_m;
7996         void *hdrs_v;
7997         char *l24_v;
7998         unsigned int i;
7999
8000         if (!eth_v)
8001                 return;
8002         if (!eth_m)
8003                 eth_m = &nic_mask;
8004         if (inner) {
8005                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8006                                          inner_headers);
8007                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8008         } else {
8009                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8010                                          outer_headers);
8011                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8012         }
8013         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
8014                &eth_m->dst, sizeof(eth_m->dst));
8015         /* The value must be in the range of the mask. */
8016         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
8017         for (i = 0; i < sizeof(eth_m->dst); ++i)
8018                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
8019         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
8020                &eth_m->src, sizeof(eth_m->src));
8021         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
8022         /* The value must be in the range of the mask. */
8023         for (i = 0; i < sizeof(eth_m->dst); ++i)
8024                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
8025         /*
8026          * HW supports match on one Ethertype, the Ethertype following the last
8027          * VLAN tag of the packet (see PRM).
8028          * Set match on ethertype only if ETH header is not followed by VLAN.
8029          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8030          * ethertype, and use ip_version field instead.
8031          * eCPRI over Ether layer will use type value 0xAEFE.
8032          */
8033         if (eth_m->type == 0xFFFF) {
8034                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
8035                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8036                 switch (eth_v->type) {
8037                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8038                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8039                         return;
8040                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
8041                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8042                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8043                         return;
8044                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8045                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8046                         return;
8047                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8048                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8049                         return;
8050                 default:
8051                         break;
8052                 }
8053         }
8054         if (eth_m->has_vlan) {
8055                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8056                 if (eth_v->has_vlan) {
8057                         /*
8058                          * Here, when also has_more_vlan field in VLAN item is
8059                          * not set, only single-tagged packets will be matched.
8060                          */
8061                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8062                         return;
8063                 }
8064         }
8065         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8066                  rte_be_to_cpu_16(eth_m->type));
8067         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
8068         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
8069 }
8070
8071 /**
8072  * Add VLAN item to matcher and to the value.
8073  *
8074  * @param[in, out] dev_flow
8075  *   Flow descriptor.
8076  * @param[in, out] matcher
8077  *   Flow matcher.
8078  * @param[in, out] key
8079  *   Flow matcher value.
8080  * @param[in] item
8081  *   Flow pattern to translate.
8082  * @param[in] inner
8083  *   Item is inner pattern.
8084  */
8085 static void
8086 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
8087                             void *matcher, void *key,
8088                             const struct rte_flow_item *item,
8089                             int inner, uint32_t group)
8090 {
8091         const struct rte_flow_item_vlan *vlan_m = item->mask;
8092         const struct rte_flow_item_vlan *vlan_v = item->spec;
8093         void *hdrs_m;
8094         void *hdrs_v;
8095         uint16_t tci_m;
8096         uint16_t tci_v;
8097
8098         if (inner) {
8099                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8100                                          inner_headers);
8101                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8102         } else {
8103                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8104                                          outer_headers);
8105                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8106                 /*
8107                  * This is workaround, masks are not supported,
8108                  * and pre-validated.
8109                  */
8110                 if (vlan_v)
8111                         dev_flow->handle->vf_vlan.tag =
8112                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
8113         }
8114         /*
8115          * When VLAN item exists in flow, mark packet as tagged,
8116          * even if TCI is not specified.
8117          */
8118         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
8119                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8120                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8121         }
8122         if (!vlan_v)
8123                 return;
8124         if (!vlan_m)
8125                 vlan_m = &rte_flow_item_vlan_mask;
8126         tci_m = rte_be_to_cpu_16(vlan_m->tci);
8127         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
8128         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
8129         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
8130         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
8131         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
8132         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
8133         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
8134         /*
8135          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8136          * ethertype, and use ip_version field instead.
8137          */
8138         if (vlan_m->inner_type == 0xFFFF) {
8139                 switch (vlan_v->inner_type) {
8140                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8141                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8142                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8143                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8144                         return;
8145                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8146                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8147                         return;
8148                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8149                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8150                         return;
8151                 default:
8152                         break;
8153                 }
8154         }
8155         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
8156                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8157                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8158                 /* Only one vlan_tag bit can be set. */
8159                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8160                 return;
8161         }
8162         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8163                  rte_be_to_cpu_16(vlan_m->inner_type));
8164         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
8165                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
8166 }
8167
8168 /**
8169  * Add IPV4 item to matcher and to the value.
8170  *
8171  * @param[in, out] matcher
8172  *   Flow matcher.
8173  * @param[in, out] key
8174  *   Flow matcher value.
8175  * @param[in] item
8176  *   Flow pattern to translate.
8177  * @param[in] inner
8178  *   Item is inner pattern.
8179  * @param[in] group
8180  *   The group to insert the rule.
8181  */
8182 static void
8183 flow_dv_translate_item_ipv4(void *matcher, void *key,
8184                             const struct rte_flow_item *item,
8185                             int inner, uint32_t group)
8186 {
8187         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
8188         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
8189         const struct rte_flow_item_ipv4 nic_mask = {
8190                 .hdr = {
8191                         .src_addr = RTE_BE32(0xffffffff),
8192                         .dst_addr = RTE_BE32(0xffffffff),
8193                         .type_of_service = 0xff,
8194                         .next_proto_id = 0xff,
8195                         .time_to_live = 0xff,
8196                 },
8197         };
8198         void *headers_m;
8199         void *headers_v;
8200         char *l24_m;
8201         char *l24_v;
8202         uint8_t tos;
8203
8204         if (inner) {
8205                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8206                                          inner_headers);
8207                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8208         } else {
8209                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8210                                          outer_headers);
8211                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8212         }
8213         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
8214         if (!ipv4_v)
8215                 return;
8216         if (!ipv4_m)
8217                 ipv4_m = &nic_mask;
8218         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8219                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8220         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8221                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8222         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
8223         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
8224         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8225                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8226         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8227                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8228         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
8229         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
8230         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
8231         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
8232                  ipv4_m->hdr.type_of_service);
8233         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
8234         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
8235                  ipv4_m->hdr.type_of_service >> 2);
8236         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
8237         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8238                  ipv4_m->hdr.next_proto_id);
8239         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8240                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
8241         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8242                  ipv4_m->hdr.time_to_live);
8243         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8244                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
8245         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8246                  !!(ipv4_m->hdr.fragment_offset));
8247         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8248                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
8249 }
8250
8251 /**
8252  * Add IPV6 item to matcher and to the value.
8253  *
8254  * @param[in, out] matcher
8255  *   Flow matcher.
8256  * @param[in, out] key
8257  *   Flow matcher value.
8258  * @param[in] item
8259  *   Flow pattern to translate.
8260  * @param[in] inner
8261  *   Item is inner pattern.
8262  * @param[in] group
8263  *   The group to insert the rule.
8264  */
8265 static void
8266 flow_dv_translate_item_ipv6(void *matcher, void *key,
8267                             const struct rte_flow_item *item,
8268                             int inner, uint32_t group)
8269 {
8270         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
8271         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
8272         const struct rte_flow_item_ipv6 nic_mask = {
8273                 .hdr = {
8274                         .src_addr =
8275                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8276                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8277                         .dst_addr =
8278                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8279                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8280                         .vtc_flow = RTE_BE32(0xffffffff),
8281                         .proto = 0xff,
8282                         .hop_limits = 0xff,
8283                 },
8284         };
8285         void *headers_m;
8286         void *headers_v;
8287         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8288         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8289         char *l24_m;
8290         char *l24_v;
8291         uint32_t vtc_m;
8292         uint32_t vtc_v;
8293         int i;
8294         int size;
8295
8296         if (inner) {
8297                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8298                                          inner_headers);
8299                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8300         } else {
8301                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8302                                          outer_headers);
8303                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8304         }
8305         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
8306         if (!ipv6_v)
8307                 return;
8308         if (!ipv6_m)
8309                 ipv6_m = &nic_mask;
8310         size = sizeof(ipv6_m->hdr.dst_addr);
8311         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8312                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8313         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8314                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8315         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
8316         for (i = 0; i < size; ++i)
8317                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
8318         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8319                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8320         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8321                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8322         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
8323         for (i = 0; i < size; ++i)
8324                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
8325         /* TOS. */
8326         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
8327         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
8328         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
8329         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
8330         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
8331         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
8332         /* Label. */
8333         if (inner) {
8334                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
8335                          vtc_m);
8336                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
8337                          vtc_v);
8338         } else {
8339                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
8340                          vtc_m);
8341                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
8342                          vtc_v);
8343         }
8344         /* Protocol. */
8345         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8346                  ipv6_m->hdr.proto);
8347         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8348                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
8349         /* Hop limit. */
8350         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8351                  ipv6_m->hdr.hop_limits);
8352         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8353                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
8354         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8355                  !!(ipv6_m->has_frag_ext));
8356         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8357                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
8358 }
8359
8360 /**
8361  * Add IPV6 fragment extension item to matcher and to the value.
8362  *
8363  * @param[in, out] matcher
8364  *   Flow matcher.
8365  * @param[in, out] key
8366  *   Flow matcher value.
8367  * @param[in] item
8368  *   Flow pattern to translate.
8369  * @param[in] inner
8370  *   Item is inner pattern.
8371  */
8372 static void
8373 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
8374                                      const struct rte_flow_item *item,
8375                                      int inner)
8376 {
8377         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
8378         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
8379         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
8380                 .hdr = {
8381                         .next_header = 0xff,
8382                         .frag_data = RTE_BE16(0xffff),
8383                 },
8384         };
8385         void *headers_m;
8386         void *headers_v;
8387
8388         if (inner) {
8389                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8390                                          inner_headers);
8391                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8392         } else {
8393                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8394                                          outer_headers);
8395                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8396         }
8397         /* IPv6 fragment extension item exists, so packet is IP fragment. */
8398         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
8399         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
8400         if (!ipv6_frag_ext_v)
8401                 return;
8402         if (!ipv6_frag_ext_m)
8403                 ipv6_frag_ext_m = &nic_mask;
8404         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8405                  ipv6_frag_ext_m->hdr.next_header);
8406         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8407                  ipv6_frag_ext_v->hdr.next_header &
8408                  ipv6_frag_ext_m->hdr.next_header);
8409 }
8410
8411 /**
8412  * Add TCP item to matcher and to the value.
8413  *
8414  * @param[in, out] matcher
8415  *   Flow matcher.
8416  * @param[in, out] key
8417  *   Flow matcher value.
8418  * @param[in] item
8419  *   Flow pattern to translate.
8420  * @param[in] inner
8421  *   Item is inner pattern.
8422  */
8423 static void
8424 flow_dv_translate_item_tcp(void *matcher, void *key,
8425                            const struct rte_flow_item *item,
8426                            int inner)
8427 {
8428         const struct rte_flow_item_tcp *tcp_m = item->mask;
8429         const struct rte_flow_item_tcp *tcp_v = item->spec;
8430         void *headers_m;
8431         void *headers_v;
8432
8433         if (inner) {
8434                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8435                                          inner_headers);
8436                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8437         } else {
8438                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8439                                          outer_headers);
8440                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8441         }
8442         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8443         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8444         if (!tcp_v)
8445                 return;
8446         if (!tcp_m)
8447                 tcp_m = &rte_flow_item_tcp_mask;
8448         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8449                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
8450         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8451                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8452         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8453                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8454         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8455                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8456         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8457                  tcp_m->hdr.tcp_flags);
8458         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8459                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8460 }
8461
8462 /**
8463  * Add UDP item to matcher and to the value.
8464  *
8465  * @param[in, out] matcher
8466  *   Flow matcher.
8467  * @param[in, out] key
8468  *   Flow matcher value.
8469  * @param[in] item
8470  *   Flow pattern to translate.
8471  * @param[in] inner
8472  *   Item is inner pattern.
8473  */
8474 static void
8475 flow_dv_translate_item_udp(void *matcher, void *key,
8476                            const struct rte_flow_item *item,
8477                            int inner)
8478 {
8479         const struct rte_flow_item_udp *udp_m = item->mask;
8480         const struct rte_flow_item_udp *udp_v = item->spec;
8481         void *headers_m;
8482         void *headers_v;
8483
8484         if (inner) {
8485                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8486                                          inner_headers);
8487                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8488         } else {
8489                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8490                                          outer_headers);
8491                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8492         }
8493         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8494         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8495         if (!udp_v)
8496                 return;
8497         if (!udp_m)
8498                 udp_m = &rte_flow_item_udp_mask;
8499         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8500                  rte_be_to_cpu_16(udp_m->hdr.src_port));
8501         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8502                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8503         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8504                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
8505         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8506                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8507 }
8508
8509 /**
8510  * Add GRE optional Key item to matcher and to the value.
8511  *
8512  * @param[in, out] matcher
8513  *   Flow matcher.
8514  * @param[in, out] key
8515  *   Flow matcher value.
8516  * @param[in] item
8517  *   Flow pattern to translate.
8518  * @param[in] inner
8519  *   Item is inner pattern.
8520  */
8521 static void
8522 flow_dv_translate_item_gre_key(void *matcher, void *key,
8523                                    const struct rte_flow_item *item)
8524 {
8525         const rte_be32_t *key_m = item->mask;
8526         const rte_be32_t *key_v = item->spec;
8527         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8528         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8529         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8530
8531         /* GRE K bit must be on and should already be validated */
8532         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8533         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8534         if (!key_v)
8535                 return;
8536         if (!key_m)
8537                 key_m = &gre_key_default_mask;
8538         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8539                  rte_be_to_cpu_32(*key_m) >> 8);
8540         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8541                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8542         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8543                  rte_be_to_cpu_32(*key_m) & 0xFF);
8544         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8545                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8546 }
8547
8548 /**
8549  * Add GRE item to matcher and to the value.
8550  *
8551  * @param[in, out] matcher
8552  *   Flow matcher.
8553  * @param[in, out] key
8554  *   Flow matcher value.
8555  * @param[in] item
8556  *   Flow pattern to translate.
8557  * @param[in] inner
8558  *   Item is inner pattern.
8559  */
8560 static void
8561 flow_dv_translate_item_gre(void *matcher, void *key,
8562                            const struct rte_flow_item *item,
8563                            int inner)
8564 {
8565         const struct rte_flow_item_gre *gre_m = item->mask;
8566         const struct rte_flow_item_gre *gre_v = item->spec;
8567         void *headers_m;
8568         void *headers_v;
8569         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8570         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8571         struct {
8572                 union {
8573                         __extension__
8574                         struct {
8575                                 uint16_t version:3;
8576                                 uint16_t rsvd0:9;
8577                                 uint16_t s_present:1;
8578                                 uint16_t k_present:1;
8579                                 uint16_t rsvd_bit1:1;
8580                                 uint16_t c_present:1;
8581                         };
8582                         uint16_t value;
8583                 };
8584         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8585
8586         if (inner) {
8587                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8588                                          inner_headers);
8589                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8590         } else {
8591                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8592                                          outer_headers);
8593                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8594         }
8595         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8596         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8597         if (!gre_v)
8598                 return;
8599         if (!gre_m)
8600                 gre_m = &rte_flow_item_gre_mask;
8601         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
8602                  rte_be_to_cpu_16(gre_m->protocol));
8603         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8604                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
8605         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
8606         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
8607         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
8608                  gre_crks_rsvd0_ver_m.c_present);
8609         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
8610                  gre_crks_rsvd0_ver_v.c_present &
8611                  gre_crks_rsvd0_ver_m.c_present);
8612         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
8613                  gre_crks_rsvd0_ver_m.k_present);
8614         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
8615                  gre_crks_rsvd0_ver_v.k_present &
8616                  gre_crks_rsvd0_ver_m.k_present);
8617         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
8618                  gre_crks_rsvd0_ver_m.s_present);
8619         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
8620                  gre_crks_rsvd0_ver_v.s_present &
8621                  gre_crks_rsvd0_ver_m.s_present);
8622 }
8623
8624 /**
8625  * Add NVGRE item to matcher and to the value.
8626  *
8627  * @param[in, out] matcher
8628  *   Flow matcher.
8629  * @param[in, out] key
8630  *   Flow matcher value.
8631  * @param[in] item
8632  *   Flow pattern to translate.
8633  * @param[in] inner
8634  *   Item is inner pattern.
8635  */
8636 static void
8637 flow_dv_translate_item_nvgre(void *matcher, void *key,
8638                              const struct rte_flow_item *item,
8639                              int inner)
8640 {
8641         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
8642         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
8643         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8644         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8645         const char *tni_flow_id_m;
8646         const char *tni_flow_id_v;
8647         char *gre_key_m;
8648         char *gre_key_v;
8649         int size;
8650         int i;
8651
8652         /* For NVGRE, GRE header fields must be set with defined values. */
8653         const struct rte_flow_item_gre gre_spec = {
8654                 .c_rsvd0_ver = RTE_BE16(0x2000),
8655                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
8656         };
8657         const struct rte_flow_item_gre gre_mask = {
8658                 .c_rsvd0_ver = RTE_BE16(0xB000),
8659                 .protocol = RTE_BE16(UINT16_MAX),
8660         };
8661         const struct rte_flow_item gre_item = {
8662                 .spec = &gre_spec,
8663                 .mask = &gre_mask,
8664                 .last = NULL,
8665         };
8666         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
8667         if (!nvgre_v)
8668                 return;
8669         if (!nvgre_m)
8670                 nvgre_m = &rte_flow_item_nvgre_mask;
8671         tni_flow_id_m = (const char *)nvgre_m->tni;
8672         tni_flow_id_v = (const char *)nvgre_v->tni;
8673         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
8674         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
8675         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
8676         memcpy(gre_key_m, tni_flow_id_m, size);
8677         for (i = 0; i < size; ++i)
8678                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
8679 }
8680
8681 /**
8682  * Add VXLAN item to matcher and to the value.
8683  *
8684  * @param[in, out] matcher
8685  *   Flow matcher.
8686  * @param[in, out] key
8687  *   Flow matcher value.
8688  * @param[in] item
8689  *   Flow pattern to translate.
8690  * @param[in] inner
8691  *   Item is inner pattern.
8692  */
8693 static void
8694 flow_dv_translate_item_vxlan(void *matcher, void *key,
8695                              const struct rte_flow_item *item,
8696                              int inner)
8697 {
8698         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
8699         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
8700         void *headers_m;
8701         void *headers_v;
8702         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8703         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8704         char *vni_m;
8705         char *vni_v;
8706         uint16_t dport;
8707         int size;
8708         int i;
8709
8710         if (inner) {
8711                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8712                                          inner_headers);
8713                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8714         } else {
8715                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8716                                          outer_headers);
8717                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8718         }
8719         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8720                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8721         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8722                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8723                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8724         }
8725         if (!vxlan_v)
8726                 return;
8727         if (!vxlan_m)
8728                 vxlan_m = &rte_flow_item_vxlan_mask;
8729         size = sizeof(vxlan_m->vni);
8730         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8731         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8732         memcpy(vni_m, vxlan_m->vni, size);
8733         for (i = 0; i < size; ++i)
8734                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8735 }
8736
8737 /**
8738  * Add VXLAN-GPE item to matcher and to the value.
8739  *
8740  * @param[in, out] matcher
8741  *   Flow matcher.
8742  * @param[in, out] key
8743  *   Flow matcher value.
8744  * @param[in] item
8745  *   Flow pattern to translate.
8746  * @param[in] inner
8747  *   Item is inner pattern.
8748  */
8749
8750 static void
8751 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
8752                                  const struct rte_flow_item *item, int inner)
8753 {
8754         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
8755         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
8756         void *headers_m;
8757         void *headers_v;
8758         void *misc_m =
8759                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
8760         void *misc_v =
8761                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8762         char *vni_m;
8763         char *vni_v;
8764         uint16_t dport;
8765         int size;
8766         int i;
8767         uint8_t flags_m = 0xff;
8768         uint8_t flags_v = 0xc;
8769
8770         if (inner) {
8771                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8772                                          inner_headers);
8773                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8774         } else {
8775                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8776                                          outer_headers);
8777                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8778         }
8779         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8780                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8781         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8782                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8783                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8784         }
8785         if (!vxlan_v)
8786                 return;
8787         if (!vxlan_m)
8788                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
8789         size = sizeof(vxlan_m->vni);
8790         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
8791         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
8792         memcpy(vni_m, vxlan_m->vni, size);
8793         for (i = 0; i < size; ++i)
8794                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8795         if (vxlan_m->flags) {
8796                 flags_m = vxlan_m->flags;
8797                 flags_v = vxlan_v->flags;
8798         }
8799         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
8800         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
8801         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
8802                  vxlan_m->protocol);
8803         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
8804                  vxlan_v->protocol);
8805 }
8806
8807 /**
8808  * Add Geneve item to matcher and to the value.
8809  *
8810  * @param[in, out] matcher
8811  *   Flow matcher.
8812  * @param[in, out] key
8813  *   Flow matcher value.
8814  * @param[in] item
8815  *   Flow pattern to translate.
8816  * @param[in] inner
8817  *   Item is inner pattern.
8818  */
8819
8820 static void
8821 flow_dv_translate_item_geneve(void *matcher, void *key,
8822                               const struct rte_flow_item *item, int inner)
8823 {
8824         const struct rte_flow_item_geneve *geneve_m = item->mask;
8825         const struct rte_flow_item_geneve *geneve_v = item->spec;
8826         void *headers_m;
8827         void *headers_v;
8828         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8829         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8830         uint16_t dport;
8831         uint16_t gbhdr_m;
8832         uint16_t gbhdr_v;
8833         char *vni_m;
8834         char *vni_v;
8835         size_t size, i;
8836
8837         if (inner) {
8838                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8839                                          inner_headers);
8840                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8841         } else {
8842                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8843                                          outer_headers);
8844                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8845         }
8846         dport = MLX5_UDP_PORT_GENEVE;
8847         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8848                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8849                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8850         }
8851         if (!geneve_v)
8852                 return;
8853         if (!geneve_m)
8854                 geneve_m = &rte_flow_item_geneve_mask;
8855         size = sizeof(geneve_m->vni);
8856         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
8857         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
8858         memcpy(vni_m, geneve_m->vni, size);
8859         for (i = 0; i < size; ++i)
8860                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
8861         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
8862                  rte_be_to_cpu_16(geneve_m->protocol));
8863         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
8864                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
8865         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
8866         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
8867         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
8868                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8869         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
8870                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8871         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8872                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8873         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8874                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
8875                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8876 }
8877
8878 /**
8879  * Create Geneve TLV option resource.
8880  *
8881  * @param dev[in, out]
8882  *   Pointer to rte_eth_dev structure.
8883  * @param[in, out] tag_be24
8884  *   Tag value in big endian then R-shift 8.
8885  * @parm[in, out] dev_flow
8886  *   Pointer to the dev_flow.
8887  * @param[out] error
8888  *   pointer to error structure.
8889  *
8890  * @return
8891  *   0 on success otherwise -errno and errno is set.
8892  */
8893
8894 int
8895 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
8896                                              const struct rte_flow_item *item,
8897                                              struct rte_flow_error *error)
8898 {
8899         struct mlx5_priv *priv = dev->data->dev_private;
8900         struct mlx5_dev_ctx_shared *sh = priv->sh;
8901         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
8902                         sh->geneve_tlv_option_resource;
8903         struct mlx5_devx_obj *obj;
8904         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8905         int ret = 0;
8906
8907         if (!geneve_opt_v)
8908                 return -1;
8909         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
8910         if (geneve_opt_resource != NULL) {
8911                 if (geneve_opt_resource->option_class ==
8912                         geneve_opt_v->option_class &&
8913                         geneve_opt_resource->option_type ==
8914                         geneve_opt_v->option_type &&
8915                         geneve_opt_resource->length ==
8916                         geneve_opt_v->option_len) {
8917                         /* We already have GENVE TLV option obj allocated. */
8918                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
8919                                            __ATOMIC_RELAXED);
8920                 } else {
8921                         ret = rte_flow_error_set(error, ENOMEM,
8922                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8923                                 "Only one GENEVE TLV option supported");
8924                         goto exit;
8925                 }
8926         } else {
8927                 /* Create a GENEVE TLV object and resource. */
8928                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
8929                                 geneve_opt_v->option_class,
8930                                 geneve_opt_v->option_type,
8931                                 geneve_opt_v->option_len);
8932                 if (!obj) {
8933                         ret = rte_flow_error_set(error, ENODATA,
8934                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8935                                 "Failed to create GENEVE TLV Devx object");
8936                         goto exit;
8937                 }
8938                 sh->geneve_tlv_option_resource =
8939                                 mlx5_malloc(MLX5_MEM_ZERO,
8940                                                 sizeof(*geneve_opt_resource),
8941                                                 0, SOCKET_ID_ANY);
8942                 if (!sh->geneve_tlv_option_resource) {
8943                         claim_zero(mlx5_devx_cmd_destroy(obj));
8944                         ret = rte_flow_error_set(error, ENOMEM,
8945                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8946                                 "GENEVE TLV object memory allocation failed");
8947                         goto exit;
8948                 }
8949                 geneve_opt_resource = sh->geneve_tlv_option_resource;
8950                 geneve_opt_resource->obj = obj;
8951                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
8952                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
8953                 geneve_opt_resource->length = geneve_opt_v->option_len;
8954                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
8955                                 __ATOMIC_RELAXED);
8956         }
8957 exit:
8958         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
8959         return ret;
8960 }
8961
8962 /**
8963  * Add Geneve TLV option item to matcher.
8964  *
8965  * @param[in, out] dev
8966  *   Pointer to rte_eth_dev structure.
8967  * @param[in, out] matcher
8968  *   Flow matcher.
8969  * @param[in, out] key
8970  *   Flow matcher value.
8971  * @param[in] item
8972  *   Flow pattern to translate.
8973  * @param[out] error
8974  *   Pointer to error structure.
8975  */
8976 static int
8977 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
8978                                   void *key, const struct rte_flow_item *item,
8979                                   struct rte_flow_error *error)
8980 {
8981         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
8982         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8983         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8984         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8985         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8986                         misc_parameters_3);
8987         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8988         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
8989         int ret = 0;
8990
8991         if (!geneve_opt_v)
8992                 return -1;
8993         if (!geneve_opt_m)
8994                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
8995         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
8996                                                            error);
8997         if (ret) {
8998                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
8999                 return ret;
9000         }
9001         /*
9002          * Set the option length in GENEVE header if not requested.
9003          * The GENEVE TLV option length is expressed by the option length field
9004          * in the GENEVE header.
9005          * If the option length was not requested but the GENEVE TLV option item
9006          * is present we set the option length field implicitly.
9007          */
9008         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
9009                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9010                          MLX5_GENEVE_OPTLEN_MASK);
9011                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9012                          geneve_opt_v->option_len + 1);
9013         }
9014         /* Set the data. */
9015         if (geneve_opt_v->data) {
9016                 memcpy(&opt_data_key, geneve_opt_v->data,
9017                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9018                                 sizeof(opt_data_key)));
9019                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9020                                 sizeof(opt_data_key));
9021                 memcpy(&opt_data_mask, geneve_opt_m->data,
9022                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9023                                 sizeof(opt_data_mask)));
9024                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9025                                 sizeof(opt_data_mask));
9026                 MLX5_SET(fte_match_set_misc3, misc3_m,
9027                                 geneve_tlv_option_0_data,
9028                                 rte_be_to_cpu_32(opt_data_mask));
9029                 MLX5_SET(fte_match_set_misc3, misc3_v,
9030                                 geneve_tlv_option_0_data,
9031                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
9032         }
9033         return ret;
9034 }
9035
9036 /**
9037  * Add MPLS item to matcher and to the value.
9038  *
9039  * @param[in, out] matcher
9040  *   Flow matcher.
9041  * @param[in, out] key
9042  *   Flow matcher value.
9043  * @param[in] item
9044  *   Flow pattern to translate.
9045  * @param[in] prev_layer
9046  *   The protocol layer indicated in previous item.
9047  * @param[in] inner
9048  *   Item is inner pattern.
9049  */
9050 static void
9051 flow_dv_translate_item_mpls(void *matcher, void *key,
9052                             const struct rte_flow_item *item,
9053                             uint64_t prev_layer,
9054                             int inner)
9055 {
9056         const uint32_t *in_mpls_m = item->mask;
9057         const uint32_t *in_mpls_v = item->spec;
9058         uint32_t *out_mpls_m = 0;
9059         uint32_t *out_mpls_v = 0;
9060         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9061         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9062         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
9063                                      misc_parameters_2);
9064         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9065         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9066         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9067
9068         switch (prev_layer) {
9069         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9070                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
9071                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9072                          MLX5_UDP_PORT_MPLS);
9073                 break;
9074         case MLX5_FLOW_LAYER_GRE:
9075                 /* Fall-through. */
9076         case MLX5_FLOW_LAYER_GRE_KEY:
9077                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
9078                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
9079                          RTE_ETHER_TYPE_MPLS);
9080                 break;
9081         default:
9082                 break;
9083         }
9084         if (!in_mpls_v)
9085                 return;
9086         if (!in_mpls_m)
9087                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
9088         switch (prev_layer) {
9089         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9090                 out_mpls_m =
9091                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9092                                                  outer_first_mpls_over_udp);
9093                 out_mpls_v =
9094                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9095                                                  outer_first_mpls_over_udp);
9096                 break;
9097         case MLX5_FLOW_LAYER_GRE:
9098                 out_mpls_m =
9099                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9100                                                  outer_first_mpls_over_gre);
9101                 out_mpls_v =
9102                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9103                                                  outer_first_mpls_over_gre);
9104                 break;
9105         default:
9106                 /* Inner MPLS not over GRE is not supported. */
9107                 if (!inner) {
9108                         out_mpls_m =
9109                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9110                                                          misc2_m,
9111                                                          outer_first_mpls);
9112                         out_mpls_v =
9113                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9114                                                          misc2_v,
9115                                                          outer_first_mpls);
9116                 }
9117                 break;
9118         }
9119         if (out_mpls_m && out_mpls_v) {
9120                 *out_mpls_m = *in_mpls_m;
9121                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
9122         }
9123 }
9124
9125 /**
9126  * Add metadata register item to matcher
9127  *
9128  * @param[in, out] matcher
9129  *   Flow matcher.
9130  * @param[in, out] key
9131  *   Flow matcher value.
9132  * @param[in] reg_type
9133  *   Type of device metadata register
9134  * @param[in] value
9135  *   Register value
9136  * @param[in] mask
9137  *   Register mask
9138  */
9139 static void
9140 flow_dv_match_meta_reg(void *matcher, void *key,
9141                        enum modify_reg reg_type,
9142                        uint32_t data, uint32_t mask)
9143 {
9144         void *misc2_m =
9145                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
9146         void *misc2_v =
9147                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9148         uint32_t temp;
9149
9150         data &= mask;
9151         switch (reg_type) {
9152         case REG_A:
9153                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
9154                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
9155                 break;
9156         case REG_B:
9157                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
9158                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
9159                 break;
9160         case REG_C_0:
9161                 /*
9162                  * The metadata register C0 field might be divided into
9163                  * source vport index and META item value, we should set
9164                  * this field according to specified mask, not as whole one.
9165                  */
9166                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
9167                 temp |= mask;
9168                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
9169                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
9170                 temp &= ~mask;
9171                 temp |= data;
9172                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
9173                 break;
9174         case REG_C_1:
9175                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
9176                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
9177                 break;
9178         case REG_C_2:
9179                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
9180                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
9181                 break;
9182         case REG_C_3:
9183                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
9184                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
9185                 break;
9186         case REG_C_4:
9187                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
9188                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
9189                 break;
9190         case REG_C_5:
9191                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
9192                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
9193                 break;
9194         case REG_C_6:
9195                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
9196                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
9197                 break;
9198         case REG_C_7:
9199                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
9200                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
9201                 break;
9202         default:
9203                 MLX5_ASSERT(false);
9204                 break;
9205         }
9206 }
9207
9208 /**
9209  * Add MARK item to matcher
9210  *
9211  * @param[in] dev
9212  *   The device to configure through.
9213  * @param[in, out] matcher
9214  *   Flow matcher.
9215  * @param[in, out] key
9216  *   Flow matcher value.
9217  * @param[in] item
9218  *   Flow pattern to translate.
9219  */
9220 static void
9221 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
9222                             void *matcher, void *key,
9223                             const struct rte_flow_item *item)
9224 {
9225         struct mlx5_priv *priv = dev->data->dev_private;
9226         const struct rte_flow_item_mark *mark;
9227         uint32_t value;
9228         uint32_t mask;
9229
9230         mark = item->mask ? (const void *)item->mask :
9231                             &rte_flow_item_mark_mask;
9232         mask = mark->id & priv->sh->dv_mark_mask;
9233         mark = (const void *)item->spec;
9234         MLX5_ASSERT(mark);
9235         value = mark->id & priv->sh->dv_mark_mask & mask;
9236         if (mask) {
9237                 enum modify_reg reg;
9238
9239                 /* Get the metadata register index for the mark. */
9240                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
9241                 MLX5_ASSERT(reg > 0);
9242                 if (reg == REG_C_0) {
9243                         struct mlx5_priv *priv = dev->data->dev_private;
9244                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9245                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9246
9247                         mask &= msk_c0;
9248                         mask <<= shl_c0;
9249                         value <<= shl_c0;
9250                 }
9251                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9252         }
9253 }
9254
9255 /**
9256  * Add META item to matcher
9257  *
9258  * @param[in] dev
9259  *   The devich to configure through.
9260  * @param[in, out] matcher
9261  *   Flow matcher.
9262  * @param[in, out] key
9263  *   Flow matcher value.
9264  * @param[in] attr
9265  *   Attributes of flow that includes this item.
9266  * @param[in] item
9267  *   Flow pattern to translate.
9268  */
9269 static void
9270 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
9271                             void *matcher, void *key,
9272                             const struct rte_flow_attr *attr,
9273                             const struct rte_flow_item *item)
9274 {
9275         const struct rte_flow_item_meta *meta_m;
9276         const struct rte_flow_item_meta *meta_v;
9277
9278         meta_m = (const void *)item->mask;
9279         if (!meta_m)
9280                 meta_m = &rte_flow_item_meta_mask;
9281         meta_v = (const void *)item->spec;
9282         if (meta_v) {
9283                 int reg;
9284                 uint32_t value = meta_v->data;
9285                 uint32_t mask = meta_m->data;
9286
9287                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
9288                 if (reg < 0)
9289                         return;
9290                 MLX5_ASSERT(reg != REG_NON);
9291                 if (reg == REG_C_0) {
9292                         struct mlx5_priv *priv = dev->data->dev_private;
9293                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9294                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9295
9296                         mask &= msk_c0;
9297                         mask <<= shl_c0;
9298                         value <<= shl_c0;
9299                 }
9300                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9301         }
9302 }
9303
9304 /**
9305  * Add vport metadata Reg C0 item to matcher
9306  *
9307  * @param[in, out] matcher
9308  *   Flow matcher.
9309  * @param[in, out] key
9310  *   Flow matcher value.
9311  * @param[in] reg
9312  *   Flow pattern to translate.
9313  */
9314 static void
9315 flow_dv_translate_item_meta_vport(void *matcher, void *key,
9316                                   uint32_t value, uint32_t mask)
9317 {
9318         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
9319 }
9320
9321 /**
9322  * Add tag item to matcher
9323  *
9324  * @param[in] dev
9325  *   The devich to configure through.
9326  * @param[in, out] matcher
9327  *   Flow matcher.
9328  * @param[in, out] key
9329  *   Flow matcher value.
9330  * @param[in] item
9331  *   Flow pattern to translate.
9332  */
9333 static void
9334 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
9335                                 void *matcher, void *key,
9336                                 const struct rte_flow_item *item)
9337 {
9338         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
9339         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
9340         uint32_t mask, value;
9341
9342         MLX5_ASSERT(tag_v);
9343         value = tag_v->data;
9344         mask = tag_m ? tag_m->data : UINT32_MAX;
9345         if (tag_v->id == REG_C_0) {
9346                 struct mlx5_priv *priv = dev->data->dev_private;
9347                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9348                 uint32_t shl_c0 = rte_bsf32(msk_c0);
9349
9350                 mask &= msk_c0;
9351                 mask <<= shl_c0;
9352                 value <<= shl_c0;
9353         }
9354         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
9355 }
9356
9357 /**
9358  * Add TAG item to matcher
9359  *
9360  * @param[in] dev
9361  *   The devich to configure through.
9362  * @param[in, out] matcher
9363  *   Flow matcher.
9364  * @param[in, out] key
9365  *   Flow matcher value.
9366  * @param[in] item
9367  *   Flow pattern to translate.
9368  */
9369 static void
9370 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
9371                            void *matcher, void *key,
9372                            const struct rte_flow_item *item)
9373 {
9374         const struct rte_flow_item_tag *tag_v = item->spec;
9375         const struct rte_flow_item_tag *tag_m = item->mask;
9376         enum modify_reg reg;
9377
9378         MLX5_ASSERT(tag_v);
9379         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
9380         /* Get the metadata register index for the tag. */
9381         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
9382         MLX5_ASSERT(reg > 0);
9383         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
9384 }
9385
9386 /**
9387  * Add source vport match to the specified matcher.
9388  *
9389  * @param[in, out] matcher
9390  *   Flow matcher.
9391  * @param[in, out] key
9392  *   Flow matcher value.
9393  * @param[in] port
9394  *   Source vport value to match
9395  * @param[in] mask
9396  *   Mask
9397  */
9398 static void
9399 flow_dv_translate_item_source_vport(void *matcher, void *key,
9400                                     int16_t port, uint16_t mask)
9401 {
9402         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9403         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9404
9405         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
9406         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
9407 }
9408
9409 /**
9410  * Translate port-id item to eswitch match on  port-id.
9411  *
9412  * @param[in] dev
9413  *   The devich to configure through.
9414  * @param[in, out] matcher
9415  *   Flow matcher.
9416  * @param[in, out] key
9417  *   Flow matcher value.
9418  * @param[in] item
9419  *   Flow pattern to translate.
9420  * @param[in]
9421  *   Flow attributes.
9422  *
9423  * @return
9424  *   0 on success, a negative errno value otherwise.
9425  */
9426 static int
9427 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
9428                                void *key, const struct rte_flow_item *item,
9429                                const struct rte_flow_attr *attr)
9430 {
9431         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
9432         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
9433         struct mlx5_priv *priv;
9434         uint16_t mask, id;
9435
9436         mask = pid_m ? pid_m->id : 0xffff;
9437         id = pid_v ? pid_v->id : dev->data->port_id;
9438         priv = mlx5_port_to_eswitch_info(id, item == NULL);
9439         if (!priv)
9440                 return -rte_errno;
9441         /*
9442          * Translate to vport field or to metadata, depending on mode.
9443          * Kernel can use either misc.source_port or half of C0 metadata
9444          * register.
9445          */
9446         if (priv->vport_meta_mask) {
9447                 /*
9448                  * Provide the hint for SW steering library
9449                  * to insert the flow into ingress domain and
9450                  * save the extra vport match.
9451                  */
9452                 if (mask == 0xffff && priv->vport_id == 0xffff &&
9453                     priv->pf_bond < 0 && attr->transfer)
9454                         flow_dv_translate_item_source_vport
9455                                 (matcher, key, priv->vport_id, mask);
9456                 /*
9457                  * We should always set the vport metadata register,
9458                  * otherwise the SW steering library can drop
9459                  * the rule if wire vport metadata value is not zero,
9460                  * it depends on kernel configuration.
9461                  */
9462                 flow_dv_translate_item_meta_vport(matcher, key,
9463                                                   priv->vport_meta_tag,
9464                                                   priv->vport_meta_mask);
9465         } else {
9466                 flow_dv_translate_item_source_vport(matcher, key,
9467                                                     priv->vport_id, mask);
9468         }
9469         return 0;
9470 }
9471
9472 /**
9473  * Add ICMP6 item to matcher and to the value.
9474  *
9475  * @param[in, out] matcher
9476  *   Flow matcher.
9477  * @param[in, out] key
9478  *   Flow matcher value.
9479  * @param[in] item
9480  *   Flow pattern to translate.
9481  * @param[in] inner
9482  *   Item is inner pattern.
9483  */
9484 static void
9485 flow_dv_translate_item_icmp6(void *matcher, void *key,
9486                               const struct rte_flow_item *item,
9487                               int inner)
9488 {
9489         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
9490         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
9491         void *headers_m;
9492         void *headers_v;
9493         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9494                                      misc_parameters_3);
9495         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9496         if (inner) {
9497                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9498                                          inner_headers);
9499                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9500         } else {
9501                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9502                                          outer_headers);
9503                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9504         }
9505         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9506         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
9507         if (!icmp6_v)
9508                 return;
9509         if (!icmp6_m)
9510                 icmp6_m = &rte_flow_item_icmp6_mask;
9511         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
9512         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
9513                  icmp6_v->type & icmp6_m->type);
9514         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
9515         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
9516                  icmp6_v->code & icmp6_m->code);
9517 }
9518
9519 /**
9520  * Add ICMP item to matcher and to the value.
9521  *
9522  * @param[in, out] matcher
9523  *   Flow matcher.
9524  * @param[in, out] key
9525  *   Flow matcher value.
9526  * @param[in] item
9527  *   Flow pattern to translate.
9528  * @param[in] inner
9529  *   Item is inner pattern.
9530  */
9531 static void
9532 flow_dv_translate_item_icmp(void *matcher, void *key,
9533                             const struct rte_flow_item *item,
9534                             int inner)
9535 {
9536         const struct rte_flow_item_icmp *icmp_m = item->mask;
9537         const struct rte_flow_item_icmp *icmp_v = item->spec;
9538         uint32_t icmp_header_data_m = 0;
9539         uint32_t icmp_header_data_v = 0;
9540         void *headers_m;
9541         void *headers_v;
9542         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9543                                      misc_parameters_3);
9544         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9545         if (inner) {
9546                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9547                                          inner_headers);
9548                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9549         } else {
9550                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9551                                          outer_headers);
9552                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9553         }
9554         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9555         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
9556         if (!icmp_v)
9557                 return;
9558         if (!icmp_m)
9559                 icmp_m = &rte_flow_item_icmp_mask;
9560         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
9561                  icmp_m->hdr.icmp_type);
9562         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
9563                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
9564         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
9565                  icmp_m->hdr.icmp_code);
9566         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
9567                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
9568         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
9569         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
9570         if (icmp_header_data_m) {
9571                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
9572                 icmp_header_data_v |=
9573                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
9574                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
9575                          icmp_header_data_m);
9576                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
9577                          icmp_header_data_v & icmp_header_data_m);
9578         }
9579 }
9580
9581 /**
9582  * Add GTP item to matcher and to the value.
9583  *
9584  * @param[in, out] matcher
9585  *   Flow matcher.
9586  * @param[in, out] key
9587  *   Flow matcher value.
9588  * @param[in] item
9589  *   Flow pattern to translate.
9590  * @param[in] inner
9591  *   Item is inner pattern.
9592  */
9593 static void
9594 flow_dv_translate_item_gtp(void *matcher, void *key,
9595                            const struct rte_flow_item *item, int inner)
9596 {
9597         const struct rte_flow_item_gtp *gtp_m = item->mask;
9598         const struct rte_flow_item_gtp *gtp_v = item->spec;
9599         void *headers_m;
9600         void *headers_v;
9601         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9602                                      misc_parameters_3);
9603         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9604         uint16_t dport = RTE_GTPU_UDP_PORT;
9605
9606         if (inner) {
9607                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9608                                          inner_headers);
9609                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9610         } else {
9611                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9612                                          outer_headers);
9613                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9614         }
9615         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9616                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9617                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9618         }
9619         if (!gtp_v)
9620                 return;
9621         if (!gtp_m)
9622                 gtp_m = &rte_flow_item_gtp_mask;
9623         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
9624                  gtp_m->v_pt_rsv_flags);
9625         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
9626                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
9627         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
9628         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
9629                  gtp_v->msg_type & gtp_m->msg_type);
9630         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
9631                  rte_be_to_cpu_32(gtp_m->teid));
9632         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
9633                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
9634 }
9635
9636 /**
9637  * Add GTP PSC item to matcher.
9638  *
9639  * @param[in, out] matcher
9640  *   Flow matcher.
9641  * @param[in, out] key
9642  *   Flow matcher value.
9643  * @param[in] item
9644  *   Flow pattern to translate.
9645  */
9646 static int
9647 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
9648                                const struct rte_flow_item *item)
9649 {
9650         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
9651         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
9652         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9653                         misc_parameters_3);
9654         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9655         union {
9656                 uint32_t w32;
9657                 struct {
9658                         uint16_t seq_num;
9659                         uint8_t npdu_num;
9660                         uint8_t next_ext_header_type;
9661                 };
9662         } dw_2;
9663         uint8_t gtp_flags;
9664
9665         /* Always set E-flag match on one, regardless of GTP item settings. */
9666         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
9667         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9668         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
9669         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
9670         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9671         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
9672         /*Set next extension header type. */
9673         dw_2.seq_num = 0;
9674         dw_2.npdu_num = 0;
9675         dw_2.next_ext_header_type = 0xff;
9676         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
9677                  rte_cpu_to_be_32(dw_2.w32));
9678         dw_2.seq_num = 0;
9679         dw_2.npdu_num = 0;
9680         dw_2.next_ext_header_type = 0x85;
9681         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
9682                  rte_cpu_to_be_32(dw_2.w32));
9683         if (gtp_psc_v) {
9684                 union {
9685                         uint32_t w32;
9686                         struct {
9687                                 uint8_t len;
9688                                 uint8_t type_flags;
9689                                 uint8_t qfi;
9690                                 uint8_t reserved;
9691                         };
9692                 } dw_0;
9693
9694                 /*Set extension header PDU type and Qos. */
9695                 if (!gtp_psc_m)
9696                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
9697                 dw_0.w32 = 0;
9698                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type);
9699                 dw_0.qfi = gtp_psc_m->qfi;
9700                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
9701                          rte_cpu_to_be_32(dw_0.w32));
9702                 dw_0.w32 = 0;
9703                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type &
9704                                                         gtp_psc_m->pdu_type);
9705                 dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi;
9706                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9707                          rte_cpu_to_be_32(dw_0.w32));
9708         }
9709         return 0;
9710 }
9711
9712 /**
9713  * Add eCPRI item to matcher and to the value.
9714  *
9715  * @param[in] dev
9716  *   The devich to configure through.
9717  * @param[in, out] matcher
9718  *   Flow matcher.
9719  * @param[in, out] key
9720  *   Flow matcher value.
9721  * @param[in] item
9722  *   Flow pattern to translate.
9723  * @param[in] samples
9724  *   Sample IDs to be used in the matching.
9725  */
9726 static void
9727 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
9728                              void *key, const struct rte_flow_item *item)
9729 {
9730         struct mlx5_priv *priv = dev->data->dev_private;
9731         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
9732         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
9733         struct rte_ecpri_common_hdr common;
9734         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
9735                                      misc_parameters_4);
9736         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
9737         uint32_t *samples;
9738         void *dw_m;
9739         void *dw_v;
9740
9741         if (!ecpri_v)
9742                 return;
9743         if (!ecpri_m)
9744                 ecpri_m = &rte_flow_item_ecpri_mask;
9745         /*
9746          * Maximal four DW samples are supported in a single matching now.
9747          * Two are used now for a eCPRI matching:
9748          * 1. Type: one byte, mask should be 0x00ff0000 in network order
9749          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
9750          *    if any.
9751          */
9752         if (!ecpri_m->hdr.common.u32)
9753                 return;
9754         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
9755         /* Need to take the whole DW as the mask to fill the entry. */
9756         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9757                             prog_sample_field_value_0);
9758         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9759                             prog_sample_field_value_0);
9760         /* Already big endian (network order) in the header. */
9761         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
9762         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
9763         /* Sample#0, used for matching type, offset 0. */
9764         MLX5_SET(fte_match_set_misc4, misc4_m,
9765                  prog_sample_field_id_0, samples[0]);
9766         /* It makes no sense to set the sample ID in the mask field. */
9767         MLX5_SET(fte_match_set_misc4, misc4_v,
9768                  prog_sample_field_id_0, samples[0]);
9769         /*
9770          * Checking if message body part needs to be matched.
9771          * Some wildcard rules only matching type field should be supported.
9772          */
9773         if (ecpri_m->hdr.dummy[0]) {
9774                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
9775                 switch (common.type) {
9776                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
9777                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
9778                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
9779                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9780                                             prog_sample_field_value_1);
9781                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9782                                             prog_sample_field_value_1);
9783                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
9784                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
9785                                             ecpri_m->hdr.dummy[0];
9786                         /* Sample#1, to match message body, offset 4. */
9787                         MLX5_SET(fte_match_set_misc4, misc4_m,
9788                                  prog_sample_field_id_1, samples[1]);
9789                         MLX5_SET(fte_match_set_misc4, misc4_v,
9790                                  prog_sample_field_id_1, samples[1]);
9791                         break;
9792                 default:
9793                         /* Others, do not match any sample ID. */
9794                         break;
9795                 }
9796         }
9797 }
9798
9799 /*
9800  * Add connection tracking status item to matcher
9801  *
9802  * @param[in] dev
9803  *   The devich to configure through.
9804  * @param[in, out] matcher
9805  *   Flow matcher.
9806  * @param[in, out] key
9807  *   Flow matcher value.
9808  * @param[in] item
9809  *   Flow pattern to translate.
9810  */
9811 static void
9812 flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
9813                               void *matcher, void *key,
9814                               const struct rte_flow_item *item)
9815 {
9816         uint32_t reg_value = 0;
9817         int reg_id;
9818         /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */
9819         uint32_t reg_mask = 0;
9820         const struct rte_flow_item_conntrack *spec = item->spec;
9821         const struct rte_flow_item_conntrack *mask = item->mask;
9822         uint32_t flags;
9823         struct rte_flow_error error;
9824
9825         if (!mask)
9826                 mask = &rte_flow_item_conntrack_mask;
9827         if (!spec || !mask->flags)
9828                 return;
9829         flags = spec->flags & mask->flags;
9830         /* The conflict should be checked in the validation. */
9831         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
9832                 reg_value |= MLX5_CT_SYNDROME_VALID;
9833         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
9834                 reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
9835         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
9836                 reg_value |= MLX5_CT_SYNDROME_INVALID;
9837         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
9838                 reg_value |= MLX5_CT_SYNDROME_TRAP;
9839         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
9840                 reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
9841         if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
9842                            RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
9843                            RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
9844                 reg_mask |= 0xc0;
9845         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
9846                 reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
9847         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
9848                 reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
9849         /* The REG_C_x value could be saved during startup. */
9850         reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);
9851         if (reg_id == REG_NON)
9852                 return;
9853         flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,
9854                                reg_value, reg_mask);
9855 }
9856
9857 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
9858
9859 #define HEADER_IS_ZERO(match_criteria, headers)                              \
9860         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
9861                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
9862
9863 /**
9864  * Calculate flow matcher enable bitmap.
9865  *
9866  * @param match_criteria
9867  *   Pointer to flow matcher criteria.
9868  *
9869  * @return
9870  *   Bitmap of enabled fields.
9871  */
9872 static uint8_t
9873 flow_dv_matcher_enable(uint32_t *match_criteria)
9874 {
9875         uint8_t match_criteria_enable;
9876
9877         match_criteria_enable =
9878                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
9879                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
9880         match_criteria_enable |=
9881                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
9882                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
9883         match_criteria_enable |=
9884                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
9885                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
9886         match_criteria_enable |=
9887                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
9888                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
9889         match_criteria_enable |=
9890                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
9891                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
9892         match_criteria_enable |=
9893                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
9894                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
9895         return match_criteria_enable;
9896 }
9897
9898 struct mlx5_hlist_entry *
9899 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
9900 {
9901         struct mlx5_dev_ctx_shared *sh = list->ctx;
9902         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9903         struct rte_eth_dev *dev = ctx->dev;
9904         struct mlx5_flow_tbl_data_entry *tbl_data;
9905         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
9906         struct rte_flow_error *error = ctx->error;
9907         union mlx5_flow_tbl_key key = { .v64 = key64 };
9908         struct mlx5_flow_tbl_resource *tbl;
9909         void *domain;
9910         uint32_t idx = 0;
9911         int ret;
9912
9913         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
9914         if (!tbl_data) {
9915                 rte_flow_error_set(error, ENOMEM,
9916                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9917                                    NULL,
9918                                    "cannot allocate flow table data entry");
9919                 return NULL;
9920         }
9921         tbl_data->idx = idx;
9922         tbl_data->tunnel = tt_prm->tunnel;
9923         tbl_data->group_id = tt_prm->group_id;
9924         tbl_data->external = !!tt_prm->external;
9925         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
9926         tbl_data->is_egress = !!key.is_egress;
9927         tbl_data->is_transfer = !!key.is_fdb;
9928         tbl_data->dummy = !!key.dummy;
9929         tbl_data->level = key.level;
9930         tbl_data->id = key.id;
9931         tbl = &tbl_data->tbl;
9932         if (key.dummy)
9933                 return &tbl_data->entry;
9934         if (key.is_fdb)
9935                 domain = sh->fdb_domain;
9936         else if (key.is_egress)
9937                 domain = sh->tx_domain;
9938         else
9939                 domain = sh->rx_domain;
9940         ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
9941         if (ret) {
9942                 rte_flow_error_set(error, ENOMEM,
9943                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9944                                    NULL, "cannot create flow table object");
9945                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9946                 return NULL;
9947         }
9948         if (key.level != 0) {
9949                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
9950                                         (tbl->obj, &tbl_data->jump.action);
9951                 if (ret) {
9952                         rte_flow_error_set(error, ENOMEM,
9953                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9954                                            NULL,
9955                                            "cannot create flow jump action");
9956                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
9957                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9958                         return NULL;
9959                 }
9960         }
9961         MKSTR(matcher_name, "%s_%s_%u_%u_matcher_cache",
9962               key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
9963               key.level, key.id);
9964         mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
9965                              flow_dv_matcher_create_cb,
9966                              flow_dv_matcher_match_cb,
9967                              flow_dv_matcher_remove_cb);
9968         return &tbl_data->entry;
9969 }
9970
9971 int
9972 flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused,
9973                      struct mlx5_hlist_entry *entry, uint64_t key64,
9974                      void *cb_ctx __rte_unused)
9975 {
9976         struct mlx5_flow_tbl_data_entry *tbl_data =
9977                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9978         union mlx5_flow_tbl_key key = { .v64 = key64 };
9979
9980         return tbl_data->level != key.level ||
9981                tbl_data->id != key.id ||
9982                tbl_data->dummy != key.dummy ||
9983                tbl_data->is_transfer != !!key.is_fdb ||
9984                tbl_data->is_egress != !!key.is_egress;
9985 }
9986
9987 /**
9988  * Get a flow table.
9989  *
9990  * @param[in, out] dev
9991  *   Pointer to rte_eth_dev structure.
9992  * @param[in] table_level
9993  *   Table level to use.
9994  * @param[in] egress
9995  *   Direction of the table.
9996  * @param[in] transfer
9997  *   E-Switch or NIC flow.
9998  * @param[in] dummy
9999  *   Dummy entry for dv API.
10000  * @param[in] table_id
10001  *   Table id to use.
10002  * @param[out] error
10003  *   pointer to error structure.
10004  *
10005  * @return
10006  *   Returns tables resource based on the index, NULL in case of failed.
10007  */
10008 struct mlx5_flow_tbl_resource *
10009 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
10010                          uint32_t table_level, uint8_t egress,
10011                          uint8_t transfer,
10012                          bool external,
10013                          const struct mlx5_flow_tunnel *tunnel,
10014                          uint32_t group_id, uint8_t dummy,
10015                          uint32_t table_id,
10016                          struct rte_flow_error *error)
10017 {
10018         struct mlx5_priv *priv = dev->data->dev_private;
10019         union mlx5_flow_tbl_key table_key = {
10020                 {
10021                         .level = table_level,
10022                         .id = table_id,
10023                         .reserved = 0,
10024                         .dummy = !!dummy,
10025                         .is_fdb = !!transfer,
10026                         .is_egress = !!egress,
10027                 }
10028         };
10029         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
10030                 .tunnel = tunnel,
10031                 .group_id = group_id,
10032                 .external = external,
10033         };
10034         struct mlx5_flow_cb_ctx ctx = {
10035                 .dev = dev,
10036                 .error = error,
10037                 .data = &tt_prm,
10038         };
10039         struct mlx5_hlist_entry *entry;
10040         struct mlx5_flow_tbl_data_entry *tbl_data;
10041
10042         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
10043         if (!entry) {
10044                 rte_flow_error_set(error, ENOMEM,
10045                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10046                                    "cannot get table");
10047                 return NULL;
10048         }
10049         DRV_LOG(DEBUG, "table_level %u table_id %u "
10050                 "tunnel %u group %u registered.",
10051                 table_level, table_id,
10052                 tunnel ? tunnel->tunnel_id : 0, group_id);
10053         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10054         return &tbl_data->tbl;
10055 }
10056
10057 void
10058 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
10059                       struct mlx5_hlist_entry *entry)
10060 {
10061         struct mlx5_dev_ctx_shared *sh = list->ctx;
10062         struct mlx5_flow_tbl_data_entry *tbl_data =
10063                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10064
10065         MLX5_ASSERT(entry && sh);
10066         if (tbl_data->jump.action)
10067                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10068         if (tbl_data->tbl.obj)
10069                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
10070         if (tbl_data->tunnel_offload && tbl_data->external) {
10071                 struct mlx5_hlist_entry *he;
10072                 struct mlx5_hlist *tunnel_grp_hash;
10073                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
10074                 union tunnel_tbl_key tunnel_key = {
10075                         .tunnel_id = tbl_data->tunnel ?
10076                                         tbl_data->tunnel->tunnel_id : 0,
10077                         .group = tbl_data->group_id
10078                 };
10079                 uint32_t table_level = tbl_data->level;
10080
10081                 tunnel_grp_hash = tbl_data->tunnel ?
10082                                         tbl_data->tunnel->groups :
10083                                         thub->groups;
10084                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
10085                 if (he)
10086                         mlx5_hlist_unregister(tunnel_grp_hash, he);
10087                 DRV_LOG(DEBUG,
10088                         "table_level %u id %u tunnel %u group %u released.",
10089                         table_level,
10090                         tbl_data->id,
10091                         tbl_data->tunnel ?
10092                         tbl_data->tunnel->tunnel_id : 0,
10093                         tbl_data->group_id);
10094         }
10095         mlx5_cache_list_destroy(&tbl_data->matchers);
10096         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10097 }
10098
10099 /**
10100  * Release a flow table.
10101  *
10102  * @param[in] sh
10103  *   Pointer to device shared structure.
10104  * @param[in] tbl
10105  *   Table resource to be released.
10106  *
10107  * @return
10108  *   Returns 0 if table was released, else return 1;
10109  */
10110 static int
10111 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
10112                              struct mlx5_flow_tbl_resource *tbl)
10113 {
10114         struct mlx5_flow_tbl_data_entry *tbl_data =
10115                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10116
10117         if (!tbl)
10118                 return 0;
10119         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
10120 }
10121
10122 int
10123 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
10124                          struct mlx5_cache_entry *entry, void *cb_ctx)
10125 {
10126         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10127         struct mlx5_flow_dv_matcher *ref = ctx->data;
10128         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
10129                                                         entry);
10130
10131         return cur->crc != ref->crc ||
10132                cur->priority != ref->priority ||
10133                memcmp((const void *)cur->mask.buf,
10134                       (const void *)ref->mask.buf, ref->mask.size);
10135 }
10136
10137 struct mlx5_cache_entry *
10138 flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
10139                           struct mlx5_cache_entry *entry __rte_unused,
10140                           void *cb_ctx)
10141 {
10142         struct mlx5_dev_ctx_shared *sh = list->ctx;
10143         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10144         struct mlx5_flow_dv_matcher *ref = ctx->data;
10145         struct mlx5_flow_dv_matcher *cache;
10146         struct mlx5dv_flow_matcher_attr dv_attr = {
10147                 .type = IBV_FLOW_ATTR_NORMAL,
10148                 .match_mask = (void *)&ref->mask,
10149         };
10150         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10151                                                             typeof(*tbl), tbl);
10152         int ret;
10153
10154         cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
10155         if (!cache) {
10156                 rte_flow_error_set(ctx->error, ENOMEM,
10157                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10158                                    "cannot create matcher");
10159                 return NULL;
10160         }
10161         *cache = *ref;
10162         dv_attr.match_criteria_enable =
10163                 flow_dv_matcher_enable(cache->mask.buf);
10164         dv_attr.priority = ref->priority;
10165         if (tbl->is_egress)
10166                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
10167         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
10168                                                &cache->matcher_object);
10169         if (ret) {
10170                 mlx5_free(cache);
10171                 rte_flow_error_set(ctx->error, ENOMEM,
10172                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10173                                    "cannot create matcher");
10174                 return NULL;
10175         }
10176         return &cache->entry;
10177 }
10178
10179 /**
10180  * Register the flow matcher.
10181  *
10182  * @param[in, out] dev
10183  *   Pointer to rte_eth_dev structure.
10184  * @param[in, out] matcher
10185  *   Pointer to flow matcher.
10186  * @param[in, out] key
10187  *   Pointer to flow table key.
10188  * @parm[in, out] dev_flow
10189  *   Pointer to the dev_flow.
10190  * @param[out] error
10191  *   pointer to error structure.
10192  *
10193  * @return
10194  *   0 on success otherwise -errno and errno is set.
10195  */
10196 static int
10197 flow_dv_matcher_register(struct rte_eth_dev *dev,
10198                          struct mlx5_flow_dv_matcher *ref,
10199                          union mlx5_flow_tbl_key *key,
10200                          struct mlx5_flow *dev_flow,
10201                          const struct mlx5_flow_tunnel *tunnel,
10202                          uint32_t group_id,
10203                          struct rte_flow_error *error)
10204 {
10205         struct mlx5_cache_entry *entry;
10206         struct mlx5_flow_dv_matcher *cache;
10207         struct mlx5_flow_tbl_resource *tbl;
10208         struct mlx5_flow_tbl_data_entry *tbl_data;
10209         struct mlx5_flow_cb_ctx ctx = {
10210                 .error = error,
10211                 .data = ref,
10212         };
10213
10214         /**
10215          * tunnel offload API requires this registration for cases when
10216          * tunnel match rule was inserted before tunnel set rule.
10217          */
10218         tbl = flow_dv_tbl_resource_get(dev, key->level,
10219                                        key->is_egress, key->is_fdb,
10220                                        dev_flow->external, tunnel,
10221                                        group_id, 0, key->id, error);
10222         if (!tbl)
10223                 return -rte_errno;      /* No need to refill the error info */
10224         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10225         ref->tbl = tbl;
10226         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
10227         if (!entry) {
10228                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10229                 return rte_flow_error_set(error, ENOMEM,
10230                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10231                                           "cannot allocate ref memory");
10232         }
10233         cache = container_of(entry, typeof(*cache), entry);
10234         dev_flow->handle->dvh.matcher = cache;
10235         return 0;
10236 }
10237
10238 struct mlx5_hlist_entry *
10239 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
10240 {
10241         struct mlx5_dev_ctx_shared *sh = list->ctx;
10242         struct rte_flow_error *error = ctx;
10243         struct mlx5_flow_dv_tag_resource *entry;
10244         uint32_t idx = 0;
10245         int ret;
10246
10247         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10248         if (!entry) {
10249                 rte_flow_error_set(error, ENOMEM,
10250                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10251                                    "cannot allocate resource memory");
10252                 return NULL;
10253         }
10254         entry->idx = idx;
10255         entry->tag_id = key;
10256         ret = mlx5_flow_os_create_flow_action_tag(key,
10257                                                   &entry->action);
10258         if (ret) {
10259                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
10260                 rte_flow_error_set(error, ENOMEM,
10261                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10262                                    NULL, "cannot create action");
10263                 return NULL;
10264         }
10265         return &entry->entry;
10266 }
10267
10268 int
10269 flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused,
10270                      struct mlx5_hlist_entry *entry, uint64_t key,
10271                      void *cb_ctx __rte_unused)
10272 {
10273         struct mlx5_flow_dv_tag_resource *tag =
10274                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10275
10276         return key != tag->tag_id;
10277 }
10278
10279 /**
10280  * Find existing tag resource or create and register a new one.
10281  *
10282  * @param dev[in, out]
10283  *   Pointer to rte_eth_dev structure.
10284  * @param[in, out] tag_be24
10285  *   Tag value in big endian then R-shift 8.
10286  * @parm[in, out] dev_flow
10287  *   Pointer to the dev_flow.
10288  * @param[out] error
10289  *   pointer to error structure.
10290  *
10291  * @return
10292  *   0 on success otherwise -errno and errno is set.
10293  */
10294 static int
10295 flow_dv_tag_resource_register
10296                         (struct rte_eth_dev *dev,
10297                          uint32_t tag_be24,
10298                          struct mlx5_flow *dev_flow,
10299                          struct rte_flow_error *error)
10300 {
10301         struct mlx5_priv *priv = dev->data->dev_private;
10302         struct mlx5_flow_dv_tag_resource *cache_resource;
10303         struct mlx5_hlist_entry *entry;
10304
10305         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
10306         if (entry) {
10307                 cache_resource = container_of
10308                         (entry, struct mlx5_flow_dv_tag_resource, entry);
10309                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
10310                 dev_flow->dv.tag_resource = cache_resource;
10311                 return 0;
10312         }
10313         return -rte_errno;
10314 }
10315
10316 void
10317 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
10318                       struct mlx5_hlist_entry *entry)
10319 {
10320         struct mlx5_dev_ctx_shared *sh = list->ctx;
10321         struct mlx5_flow_dv_tag_resource *tag =
10322                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10323
10324         MLX5_ASSERT(tag && sh && tag->action);
10325         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
10326         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
10327         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10328 }
10329
10330 /**
10331  * Release the tag.
10332  *
10333  * @param dev
10334  *   Pointer to Ethernet device.
10335  * @param tag_idx
10336  *   Tag index.
10337  *
10338  * @return
10339  *   1 while a reference on it exists, 0 when freed.
10340  */
10341 static int
10342 flow_dv_tag_release(struct rte_eth_dev *dev,
10343                     uint32_t tag_idx)
10344 {
10345         struct mlx5_priv *priv = dev->data->dev_private;
10346         struct mlx5_flow_dv_tag_resource *tag;
10347
10348         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
10349         if (!tag)
10350                 return 0;
10351         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
10352                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
10353         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
10354 }
10355
10356 /**
10357  * Translate port ID action to vport.
10358  *
10359  * @param[in] dev
10360  *   Pointer to rte_eth_dev structure.
10361  * @param[in] action
10362  *   Pointer to the port ID action.
10363  * @param[out] dst_port_id
10364  *   The target port ID.
10365  * @param[out] error
10366  *   Pointer to the error structure.
10367  *
10368  * @return
10369  *   0 on success, a negative errno value otherwise and rte_errno is set.
10370  */
10371 static int
10372 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
10373                                  const struct rte_flow_action *action,
10374                                  uint32_t *dst_port_id,
10375                                  struct rte_flow_error *error)
10376 {
10377         uint32_t port;
10378         struct mlx5_priv *priv;
10379         const struct rte_flow_action_port_id *conf =
10380                         (const struct rte_flow_action_port_id *)action->conf;
10381
10382         port = conf->original ? dev->data->port_id : conf->id;
10383         priv = mlx5_port_to_eswitch_info(port, false);
10384         if (!priv)
10385                 return rte_flow_error_set(error, -rte_errno,
10386                                           RTE_FLOW_ERROR_TYPE_ACTION,
10387                                           NULL,
10388                                           "No eswitch info was found for port");
10389 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
10390         /*
10391          * This parameter is transferred to
10392          * mlx5dv_dr_action_create_dest_ib_port().
10393          */
10394         *dst_port_id = priv->dev_port;
10395 #else
10396         /*
10397          * Legacy mode, no LAG configurations is supported.
10398          * This parameter is transferred to
10399          * mlx5dv_dr_action_create_dest_vport().
10400          */
10401         *dst_port_id = priv->vport_id;
10402 #endif
10403         return 0;
10404 }
10405
10406 /**
10407  * Create a counter with aging configuration.
10408  *
10409  * @param[in] dev
10410  *   Pointer to rte_eth_dev structure.
10411  * @param[in] dev_flow
10412  *   Pointer to the mlx5_flow.
10413  * @param[out] count
10414  *   Pointer to the counter action configuration.
10415  * @param[in] age
10416  *   Pointer to the aging action configuration.
10417  *
10418  * @return
10419  *   Index to flow counter on success, 0 otherwise.
10420  */
10421 static uint32_t
10422 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
10423                                 struct mlx5_flow *dev_flow,
10424                                 const struct rte_flow_action_count *count,
10425                                 const struct rte_flow_action_age *age)
10426 {
10427         uint32_t counter;
10428         struct mlx5_age_param *age_param;
10429
10430         if (count && count->shared)
10431                 counter = flow_dv_counter_get_shared(dev, count->id);
10432         else
10433                 counter = flow_dv_counter_alloc(dev, !!age);
10434         if (!counter || age == NULL)
10435                 return counter;
10436         age_param = flow_dv_counter_idx_get_age(dev, counter);
10437         age_param->context = age->context ? age->context :
10438                 (void *)(uintptr_t)(dev_flow->flow_idx);
10439         age_param->timeout = age->timeout;
10440         age_param->port_id = dev->data->port_id;
10441         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
10442         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
10443         return counter;
10444 }
10445
10446 /**
10447  * Add Tx queue matcher
10448  *
10449  * @param[in] dev
10450  *   Pointer to the dev struct.
10451  * @param[in, out] matcher
10452  *   Flow matcher.
10453  * @param[in, out] key
10454  *   Flow matcher value.
10455  * @param[in] item
10456  *   Flow pattern to translate.
10457  * @param[in] inner
10458  *   Item is inner pattern.
10459  */
10460 static void
10461 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
10462                                 void *matcher, void *key,
10463                                 const struct rte_flow_item *item)
10464 {
10465         const struct mlx5_rte_flow_item_tx_queue *queue_m;
10466         const struct mlx5_rte_flow_item_tx_queue *queue_v;
10467         void *misc_m =
10468                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
10469         void *misc_v =
10470                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
10471         struct mlx5_txq_ctrl *txq;
10472         uint32_t queue;
10473
10474
10475         queue_m = (const void *)item->mask;
10476         if (!queue_m)
10477                 return;
10478         queue_v = (const void *)item->spec;
10479         if (!queue_v)
10480                 return;
10481         txq = mlx5_txq_get(dev, queue_v->queue);
10482         if (!txq)
10483                 return;
10484         queue = txq->obj->sq->id;
10485         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
10486         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
10487                  queue & queue_m->queue);
10488         mlx5_txq_release(dev, queue_v->queue);
10489 }
10490
10491 /**
10492  * Set the hash fields according to the @p flow information.
10493  *
10494  * @param[in] dev_flow
10495  *   Pointer to the mlx5_flow.
10496  * @param[in] rss_desc
10497  *   Pointer to the mlx5_flow_rss_desc.
10498  */
10499 static void
10500 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
10501                        struct mlx5_flow_rss_desc *rss_desc)
10502 {
10503         uint64_t items = dev_flow->handle->layers;
10504         int rss_inner = 0;
10505         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
10506
10507         dev_flow->hash_fields = 0;
10508 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
10509         if (rss_desc->level >= 2) {
10510                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
10511                 rss_inner = 1;
10512         }
10513 #endif
10514         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
10515             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
10516                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
10517                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10518                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
10519                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10520                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
10521                         else
10522                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
10523                 }
10524         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
10525                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
10526                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
10527                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10528                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
10529                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10530                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
10531                         else
10532                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
10533                 }
10534         }
10535         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
10536             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
10537                 if (rss_types & ETH_RSS_UDP) {
10538                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10539                                 dev_flow->hash_fields |=
10540                                                 IBV_RX_HASH_SRC_PORT_UDP;
10541                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10542                                 dev_flow->hash_fields |=
10543                                                 IBV_RX_HASH_DST_PORT_UDP;
10544                         else
10545                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
10546                 }
10547         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
10548                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
10549                 if (rss_types & ETH_RSS_TCP) {
10550                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10551                                 dev_flow->hash_fields |=
10552                                                 IBV_RX_HASH_SRC_PORT_TCP;
10553                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10554                                 dev_flow->hash_fields |=
10555                                                 IBV_RX_HASH_DST_PORT_TCP;
10556                         else
10557                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
10558                 }
10559         }
10560 }
10561
10562 /**
10563  * Prepare an Rx Hash queue.
10564  *
10565  * @param dev
10566  *   Pointer to Ethernet device.
10567  * @param[in] dev_flow
10568  *   Pointer to the mlx5_flow.
10569  * @param[in] rss_desc
10570  *   Pointer to the mlx5_flow_rss_desc.
10571  * @param[out] hrxq_idx
10572  *   Hash Rx queue index.
10573  *
10574  * @return
10575  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
10576  */
10577 static struct mlx5_hrxq *
10578 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
10579                      struct mlx5_flow *dev_flow,
10580                      struct mlx5_flow_rss_desc *rss_desc,
10581                      uint32_t *hrxq_idx)
10582 {
10583         struct mlx5_priv *priv = dev->data->dev_private;
10584         struct mlx5_flow_handle *dh = dev_flow->handle;
10585         struct mlx5_hrxq *hrxq;
10586
10587         MLX5_ASSERT(rss_desc->queue_num);
10588         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
10589         rss_desc->hash_fields = dev_flow->hash_fields;
10590         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
10591         rss_desc->shared_rss = 0;
10592         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
10593         if (!*hrxq_idx)
10594                 return NULL;
10595         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10596                               *hrxq_idx);
10597         return hrxq;
10598 }
10599
10600 /**
10601  * Release sample sub action resource.
10602  *
10603  * @param[in, out] dev
10604  *   Pointer to rte_eth_dev structure.
10605  * @param[in] act_res
10606  *   Pointer to sample sub action resource.
10607  */
10608 static void
10609 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
10610                                    struct mlx5_flow_sub_actions_idx *act_res)
10611 {
10612         if (act_res->rix_hrxq) {
10613                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
10614                 act_res->rix_hrxq = 0;
10615         }
10616         if (act_res->rix_encap_decap) {
10617                 flow_dv_encap_decap_resource_release(dev,
10618                                                      act_res->rix_encap_decap);
10619                 act_res->rix_encap_decap = 0;
10620         }
10621         if (act_res->rix_port_id_action) {
10622                 flow_dv_port_id_action_resource_release(dev,
10623                                                 act_res->rix_port_id_action);
10624                 act_res->rix_port_id_action = 0;
10625         }
10626         if (act_res->rix_tag) {
10627                 flow_dv_tag_release(dev, act_res->rix_tag);
10628                 act_res->rix_tag = 0;
10629         }
10630         if (act_res->rix_jump) {
10631                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
10632                 act_res->rix_jump = 0;
10633         }
10634 }
10635
10636 int
10637 flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused,
10638                         struct mlx5_cache_entry *entry, void *cb_ctx)
10639 {
10640         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10641         struct rte_eth_dev *dev = ctx->dev;
10642         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
10643         struct mlx5_flow_dv_sample_resource *cache_resource =
10644                         container_of(entry, typeof(*cache_resource), entry);
10645
10646         if (resource->ratio == cache_resource->ratio &&
10647             resource->ft_type == cache_resource->ft_type &&
10648             resource->ft_id == cache_resource->ft_id &&
10649             resource->set_action == cache_resource->set_action &&
10650             !memcmp((void *)&resource->sample_act,
10651                     (void *)&cache_resource->sample_act,
10652                     sizeof(struct mlx5_flow_sub_actions_list))) {
10653                 /*
10654                  * Existing sample action should release the prepared
10655                  * sub-actions reference counter.
10656                  */
10657                 flow_dv_sample_sub_actions_release(dev,
10658                                                 &resource->sample_idx);
10659                 return 0;
10660         }
10661         return 1;
10662 }
10663
10664 struct mlx5_cache_entry *
10665 flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
10666                          struct mlx5_cache_entry *entry __rte_unused,
10667                          void *cb_ctx)
10668 {
10669         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10670         struct rte_eth_dev *dev = ctx->dev;
10671         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
10672         void **sample_dv_actions = resource->sub_actions;
10673         struct mlx5_flow_dv_sample_resource *cache_resource;
10674         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
10675         struct mlx5_priv *priv = dev->data->dev_private;
10676         struct mlx5_dev_ctx_shared *sh = priv->sh;
10677         struct mlx5_flow_tbl_resource *tbl;
10678         uint32_t idx = 0;
10679         const uint32_t next_ft_step = 1;
10680         uint32_t next_ft_id = resource->ft_id + next_ft_step;
10681         uint8_t is_egress = 0;
10682         uint8_t is_transfer = 0;
10683         struct rte_flow_error *error = ctx->error;
10684
10685         /* Register new sample resource. */
10686         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
10687         if (!cache_resource) {
10688                 rte_flow_error_set(error, ENOMEM,
10689                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10690                                           NULL,
10691                                           "cannot allocate resource memory");
10692                 return NULL;
10693         }
10694         *cache_resource = *resource;
10695         /* Create normal path table level */
10696         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10697                 is_transfer = 1;
10698         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
10699                 is_egress = 1;
10700         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
10701                                         is_egress, is_transfer,
10702                                         true, NULL, 0, 0, 0, error);
10703         if (!tbl) {
10704                 rte_flow_error_set(error, ENOMEM,
10705                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10706                                           NULL,
10707                                           "fail to create normal path table "
10708                                           "for sample");
10709                 goto error;
10710         }
10711         cache_resource->normal_path_tbl = tbl;
10712         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
10713                 if (!sh->default_miss_action) {
10714                         rte_flow_error_set(error, ENOMEM,
10715                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10716                                                 NULL,
10717                                                 "default miss action was not "
10718                                                 "created");
10719                         goto error;
10720                 }
10721                 sample_dv_actions[resource->sample_act.actions_num++] =
10722                                                 sh->default_miss_action;
10723         }
10724         /* Create a DR sample action */
10725         sampler_attr.sample_ratio = cache_resource->ratio;
10726         sampler_attr.default_next_table = tbl->obj;
10727         sampler_attr.num_sample_actions = resource->sample_act.actions_num;
10728         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
10729                                                         &sample_dv_actions[0];
10730         sampler_attr.action = cache_resource->set_action;
10731         if (mlx5_os_flow_dr_create_flow_action_sampler
10732                         (&sampler_attr, &cache_resource->verbs_action)) {
10733                 rte_flow_error_set(error, ENOMEM,
10734                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10735                                         NULL, "cannot create sample action");
10736                 goto error;
10737         }
10738         cache_resource->idx = idx;
10739         cache_resource->dev = dev;
10740         return &cache_resource->entry;
10741 error:
10742         if (cache_resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
10743                 flow_dv_sample_sub_actions_release(dev,
10744                                                    &cache_resource->sample_idx);
10745         if (cache_resource->normal_path_tbl)
10746                 flow_dv_tbl_resource_release(MLX5_SH(dev),
10747                                 cache_resource->normal_path_tbl);
10748         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
10749         return NULL;
10750
10751 }
10752
10753 /**
10754  * Find existing sample resource or create and register a new one.
10755  *
10756  * @param[in, out] dev
10757  *   Pointer to rte_eth_dev structure.
10758  * @param[in] resource
10759  *   Pointer to sample resource.
10760  * @parm[in, out] dev_flow
10761  *   Pointer to the dev_flow.
10762  * @param[out] error
10763  *   pointer to error structure.
10764  *
10765  * @return
10766  *   0 on success otherwise -errno and errno is set.
10767  */
10768 static int
10769 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
10770                          struct mlx5_flow_dv_sample_resource *resource,
10771                          struct mlx5_flow *dev_flow,
10772                          struct rte_flow_error *error)
10773 {
10774         struct mlx5_flow_dv_sample_resource *cache_resource;
10775         struct mlx5_cache_entry *entry;
10776         struct mlx5_priv *priv = dev->data->dev_private;
10777         struct mlx5_flow_cb_ctx ctx = {
10778                 .dev = dev,
10779                 .error = error,
10780                 .data = resource,
10781         };
10782
10783         entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
10784         if (!entry)
10785                 return -rte_errno;
10786         cache_resource = container_of(entry, typeof(*cache_resource), entry);
10787         dev_flow->handle->dvh.rix_sample = cache_resource->idx;
10788         dev_flow->dv.sample_res = cache_resource;
10789         return 0;
10790 }
10791
10792 int
10793 flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
10794                             struct mlx5_cache_entry *entry, void *cb_ctx)
10795 {
10796         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10797         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
10798         struct rte_eth_dev *dev = ctx->dev;
10799         struct mlx5_flow_dv_dest_array_resource *cache_resource =
10800                         container_of(entry, typeof(*cache_resource), entry);
10801         uint32_t idx = 0;
10802
10803         if (resource->num_of_dest == cache_resource->num_of_dest &&
10804             resource->ft_type == cache_resource->ft_type &&
10805             !memcmp((void *)cache_resource->sample_act,
10806                     (void *)resource->sample_act,
10807                    (resource->num_of_dest *
10808                    sizeof(struct mlx5_flow_sub_actions_list)))) {
10809                 /*
10810                  * Existing sample action should release the prepared
10811                  * sub-actions reference counter.
10812                  */
10813                 for (idx = 0; idx < resource->num_of_dest; idx++)
10814                         flow_dv_sample_sub_actions_release(dev,
10815                                         &resource->sample_idx[idx]);
10816                 return 0;
10817         }
10818         return 1;
10819 }
10820
10821 struct mlx5_cache_entry *
10822 flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
10823                          struct mlx5_cache_entry *entry __rte_unused,
10824                          void *cb_ctx)
10825 {
10826         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10827         struct rte_eth_dev *dev = ctx->dev;
10828         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10829         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
10830         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
10831         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
10832         struct mlx5_priv *priv = dev->data->dev_private;
10833         struct mlx5_dev_ctx_shared *sh = priv->sh;
10834         struct mlx5_flow_sub_actions_list *sample_act;
10835         struct mlx5dv_dr_domain *domain;
10836         uint32_t idx = 0, res_idx = 0;
10837         struct rte_flow_error *error = ctx->error;
10838         uint64_t action_flags;
10839         int ret;
10840
10841         /* Register new destination array resource. */
10842         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10843                                             &res_idx);
10844         if (!cache_resource) {
10845                 rte_flow_error_set(error, ENOMEM,
10846                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10847                                           NULL,
10848                                           "cannot allocate resource memory");
10849                 return NULL;
10850         }
10851         *cache_resource = *resource;
10852         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10853                 domain = sh->fdb_domain;
10854         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
10855                 domain = sh->rx_domain;
10856         else
10857                 domain = sh->tx_domain;
10858         for (idx = 0; idx < resource->num_of_dest; idx++) {
10859                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
10860                                  mlx5_malloc(MLX5_MEM_ZERO,
10861                                  sizeof(struct mlx5dv_dr_action_dest_attr),
10862                                  0, SOCKET_ID_ANY);
10863                 if (!dest_attr[idx]) {
10864                         rte_flow_error_set(error, ENOMEM,
10865                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10866                                            NULL,
10867                                            "cannot allocate resource memory");
10868                         goto error;
10869                 }
10870                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
10871                 sample_act = &resource->sample_act[idx];
10872                 action_flags = sample_act->action_flags;
10873                 switch (action_flags) {
10874                 case MLX5_FLOW_ACTION_QUEUE:
10875                         dest_attr[idx]->dest = sample_act->dr_queue_action;
10876                         break;
10877                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
10878                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
10879                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
10880                         dest_attr[idx]->dest_reformat->reformat =
10881                                         sample_act->dr_encap_action;
10882                         dest_attr[idx]->dest_reformat->dest =
10883                                         sample_act->dr_port_id_action;
10884                         break;
10885                 case MLX5_FLOW_ACTION_PORT_ID:
10886                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
10887                         break;
10888                 case MLX5_FLOW_ACTION_JUMP:
10889                         dest_attr[idx]->dest = sample_act->dr_jump_action;
10890                         break;
10891                 default:
10892                         rte_flow_error_set(error, EINVAL,
10893                                            RTE_FLOW_ERROR_TYPE_ACTION,
10894                                            NULL,
10895                                            "unsupported actions type");
10896                         goto error;
10897                 }
10898         }
10899         /* create a dest array actioin */
10900         ret = mlx5_os_flow_dr_create_flow_action_dest_array
10901                                                 (domain,
10902                                                  cache_resource->num_of_dest,
10903                                                  dest_attr,
10904                                                  &cache_resource->action);
10905         if (ret) {
10906                 rte_flow_error_set(error, ENOMEM,
10907                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10908                                    NULL,
10909                                    "cannot create destination array action");
10910                 goto error;
10911         }
10912         cache_resource->idx = res_idx;
10913         cache_resource->dev = dev;
10914         for (idx = 0; idx < resource->num_of_dest; idx++)
10915                 mlx5_free(dest_attr[idx]);
10916         return &cache_resource->entry;
10917 error:
10918         for (idx = 0; idx < resource->num_of_dest; idx++) {
10919                 flow_dv_sample_sub_actions_release(dev,
10920                                 &cache_resource->sample_idx[idx]);
10921                 if (dest_attr[idx])
10922                         mlx5_free(dest_attr[idx]);
10923         }
10924
10925         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
10926         return NULL;
10927 }
10928
10929 /**
10930  * Find existing destination array resource or create and register a new one.
10931  *
10932  * @param[in, out] dev
10933  *   Pointer to rte_eth_dev structure.
10934  * @param[in] resource
10935  *   Pointer to destination array resource.
10936  * @parm[in, out] dev_flow
10937  *   Pointer to the dev_flow.
10938  * @param[out] error
10939  *   pointer to error structure.
10940  *
10941  * @return
10942  *   0 on success otherwise -errno and errno is set.
10943  */
10944 static int
10945 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
10946                          struct mlx5_flow_dv_dest_array_resource *resource,
10947                          struct mlx5_flow *dev_flow,
10948                          struct rte_flow_error *error)
10949 {
10950         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10951         struct mlx5_priv *priv = dev->data->dev_private;
10952         struct mlx5_cache_entry *entry;
10953         struct mlx5_flow_cb_ctx ctx = {
10954                 .dev = dev,
10955                 .error = error,
10956                 .data = resource,
10957         };
10958
10959         entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
10960         if (!entry)
10961                 return -rte_errno;
10962         cache_resource = container_of(entry, typeof(*cache_resource), entry);
10963         dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
10964         dev_flow->dv.dest_array_res = cache_resource;
10965         return 0;
10966 }
10967
10968 /**
10969  * Convert Sample action to DV specification.
10970  *
10971  * @param[in] dev
10972  *   Pointer to rte_eth_dev structure.
10973  * @param[in] action
10974  *   Pointer to sample action structure.
10975  * @param[in, out] dev_flow
10976  *   Pointer to the mlx5_flow.
10977  * @param[in] attr
10978  *   Pointer to the flow attributes.
10979  * @param[in, out] num_of_dest
10980  *   Pointer to the num of destination.
10981  * @param[in, out] sample_actions
10982  *   Pointer to sample actions list.
10983  * @param[in, out] res
10984  *   Pointer to sample resource.
10985  * @param[out] error
10986  *   Pointer to the error structure.
10987  *
10988  * @return
10989  *   0 on success, a negative errno value otherwise and rte_errno is set.
10990  */
10991 static int
10992 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
10993                                 const struct rte_flow_action_sample *action,
10994                                 struct mlx5_flow *dev_flow,
10995                                 const struct rte_flow_attr *attr,
10996                                 uint32_t *num_of_dest,
10997                                 void **sample_actions,
10998                                 struct mlx5_flow_dv_sample_resource *res,
10999                                 struct rte_flow_error *error)
11000 {
11001         struct mlx5_priv *priv = dev->data->dev_private;
11002         const struct rte_flow_action *sub_actions;
11003         struct mlx5_flow_sub_actions_list *sample_act;
11004         struct mlx5_flow_sub_actions_idx *sample_idx;
11005         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11006         struct rte_flow *flow = dev_flow->flow;
11007         struct mlx5_flow_rss_desc *rss_desc;
11008         uint64_t action_flags = 0;
11009
11010         MLX5_ASSERT(wks);
11011         rss_desc = &wks->rss_desc;
11012         sample_act = &res->sample_act;
11013         sample_idx = &res->sample_idx;
11014         res->ratio = action->ratio;
11015         sub_actions = action->actions;
11016         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
11017                 int type = sub_actions->type;
11018                 uint32_t pre_rix = 0;
11019                 void *pre_r;
11020                 switch (type) {
11021                 case RTE_FLOW_ACTION_TYPE_QUEUE:
11022                 {
11023                         const struct rte_flow_action_queue *queue;
11024                         struct mlx5_hrxq *hrxq;
11025                         uint32_t hrxq_idx;
11026
11027                         queue = sub_actions->conf;
11028                         rss_desc->queue_num = 1;
11029                         rss_desc->queue[0] = queue->index;
11030                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11031                                                     rss_desc, &hrxq_idx);
11032                         if (!hrxq)
11033                                 return rte_flow_error_set
11034                                         (error, rte_errno,
11035                                          RTE_FLOW_ERROR_TYPE_ACTION,
11036                                          NULL,
11037                                          "cannot create fate queue");
11038                         sample_act->dr_queue_action = hrxq->action;
11039                         sample_idx->rix_hrxq = hrxq_idx;
11040                         sample_actions[sample_act->actions_num++] =
11041                                                 hrxq->action;
11042                         (*num_of_dest)++;
11043                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
11044                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11045                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11046                         dev_flow->handle->fate_action =
11047                                         MLX5_FLOW_FATE_QUEUE;
11048                         break;
11049                 }
11050                 case RTE_FLOW_ACTION_TYPE_RSS:
11051                 {
11052                         struct mlx5_hrxq *hrxq;
11053                         uint32_t hrxq_idx;
11054                         const struct rte_flow_action_rss *rss;
11055                         const uint8_t *rss_key;
11056
11057                         rss = sub_actions->conf;
11058                         memcpy(rss_desc->queue, rss->queue,
11059                                rss->queue_num * sizeof(uint16_t));
11060                         rss_desc->queue_num = rss->queue_num;
11061                         /* NULL RSS key indicates default RSS key. */
11062                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11063                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11064                         /*
11065                          * rss->level and rss.types should be set in advance
11066                          * when expanding items for RSS.
11067                          */
11068                         flow_dv_hashfields_set(dev_flow, rss_desc);
11069                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11070                                                     rss_desc, &hrxq_idx);
11071                         if (!hrxq)
11072                                 return rte_flow_error_set
11073                                         (error, rte_errno,
11074                                          RTE_FLOW_ERROR_TYPE_ACTION,
11075                                          NULL,
11076                                          "cannot create fate queue");
11077                         sample_act->dr_queue_action = hrxq->action;
11078                         sample_idx->rix_hrxq = hrxq_idx;
11079                         sample_actions[sample_act->actions_num++] =
11080                                                 hrxq->action;
11081                         (*num_of_dest)++;
11082                         action_flags |= MLX5_FLOW_ACTION_RSS;
11083                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11084                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11085                         dev_flow->handle->fate_action =
11086                                         MLX5_FLOW_FATE_QUEUE;
11087                         break;
11088                 }
11089                 case RTE_FLOW_ACTION_TYPE_MARK:
11090                 {
11091                         uint32_t tag_be = mlx5_flow_mark_set
11092                                 (((const struct rte_flow_action_mark *)
11093                                 (sub_actions->conf))->id);
11094
11095                         dev_flow->handle->mark = 1;
11096                         pre_rix = dev_flow->handle->dvh.rix_tag;
11097                         /* Save the mark resource before sample */
11098                         pre_r = dev_flow->dv.tag_resource;
11099                         if (flow_dv_tag_resource_register(dev, tag_be,
11100                                                   dev_flow, error))
11101                                 return -rte_errno;
11102                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11103                         sample_act->dr_tag_action =
11104                                 dev_flow->dv.tag_resource->action;
11105                         sample_idx->rix_tag =
11106                                 dev_flow->handle->dvh.rix_tag;
11107                         sample_actions[sample_act->actions_num++] =
11108                                                 sample_act->dr_tag_action;
11109                         /* Recover the mark resource after sample */
11110                         dev_flow->dv.tag_resource = pre_r;
11111                         dev_flow->handle->dvh.rix_tag = pre_rix;
11112                         action_flags |= MLX5_FLOW_ACTION_MARK;
11113                         break;
11114                 }
11115                 case RTE_FLOW_ACTION_TYPE_COUNT:
11116                 {
11117                         if (!flow->counter) {
11118                                 flow->counter =
11119                                         flow_dv_translate_create_counter(dev,
11120                                                 dev_flow, sub_actions->conf,
11121                                                 0);
11122                                 if (!flow->counter)
11123                                         return rte_flow_error_set
11124                                                 (error, rte_errno,
11125                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11126                                                 NULL,
11127                                                 "cannot create counter"
11128                                                 " object.");
11129                         }
11130                         sample_act->dr_cnt_action =
11131                                   (flow_dv_counter_get_by_idx(dev,
11132                                   flow->counter, NULL))->action;
11133                         sample_actions[sample_act->actions_num++] =
11134                                                 sample_act->dr_cnt_action;
11135                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11136                         break;
11137                 }
11138                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11139                 {
11140                         struct mlx5_flow_dv_port_id_action_resource
11141                                         port_id_resource;
11142                         uint32_t port_id = 0;
11143
11144                         memset(&port_id_resource, 0, sizeof(port_id_resource));
11145                         /* Save the port id resource before sample */
11146                         pre_rix = dev_flow->handle->rix_port_id_action;
11147                         pre_r = dev_flow->dv.port_id_action;
11148                         if (flow_dv_translate_action_port_id(dev, sub_actions,
11149                                                              &port_id, error))
11150                                 return -rte_errno;
11151                         port_id_resource.port_id = port_id;
11152                         if (flow_dv_port_id_action_resource_register
11153                             (dev, &port_id_resource, dev_flow, error))
11154                                 return -rte_errno;
11155                         sample_act->dr_port_id_action =
11156                                 dev_flow->dv.port_id_action->action;
11157                         sample_idx->rix_port_id_action =
11158                                 dev_flow->handle->rix_port_id_action;
11159                         sample_actions[sample_act->actions_num++] =
11160                                                 sample_act->dr_port_id_action;
11161                         /* Recover the port id resource after sample */
11162                         dev_flow->dv.port_id_action = pre_r;
11163                         dev_flow->handle->rix_port_id_action = pre_rix;
11164                         (*num_of_dest)++;
11165                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11166                         break;
11167                 }
11168                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11169                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11170                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11171                         /* Save the encap resource before sample */
11172                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
11173                         pre_r = dev_flow->dv.encap_decap;
11174                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
11175                                                            dev_flow,
11176                                                            attr->transfer,
11177                                                            error))
11178                                 return -rte_errno;
11179                         sample_act->dr_encap_action =
11180                                 dev_flow->dv.encap_decap->action;
11181                         sample_idx->rix_encap_decap =
11182                                 dev_flow->handle->dvh.rix_encap_decap;
11183                         sample_actions[sample_act->actions_num++] =
11184                                                 sample_act->dr_encap_action;
11185                         /* Recover the encap resource after sample */
11186                         dev_flow->dv.encap_decap = pre_r;
11187                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
11188                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11189                         break;
11190                 default:
11191                         return rte_flow_error_set(error, EINVAL,
11192                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11193                                 NULL,
11194                                 "Not support for sampler action");
11195                 }
11196         }
11197         sample_act->action_flags = action_flags;
11198         res->ft_id = dev_flow->dv.group;
11199         if (attr->transfer) {
11200                 union {
11201                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
11202                         uint64_t set_action;
11203                 } action_ctx = { .set_action = 0 };
11204
11205                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
11206                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
11207                          MLX5_MODIFICATION_TYPE_SET);
11208                 MLX5_SET(set_action_in, action_ctx.action_in, field,
11209                          MLX5_MODI_META_REG_C_0);
11210                 MLX5_SET(set_action_in, action_ctx.action_in, data,
11211                          priv->vport_meta_tag);
11212                 res->set_action = action_ctx.set_action;
11213         } else if (attr->ingress) {
11214                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11215         } else {
11216                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
11217         }
11218         return 0;
11219 }
11220
11221 /**
11222  * Convert Sample action to DV specification.
11223  *
11224  * @param[in] dev
11225  *   Pointer to rte_eth_dev structure.
11226  * @param[in, out] dev_flow
11227  *   Pointer to the mlx5_flow.
11228  * @param[in] num_of_dest
11229  *   The num of destination.
11230  * @param[in, out] res
11231  *   Pointer to sample resource.
11232  * @param[in, out] mdest_res
11233  *   Pointer to destination array resource.
11234  * @param[in] sample_actions
11235  *   Pointer to sample path actions list.
11236  * @param[in] action_flags
11237  *   Holds the actions detected until now.
11238  * @param[out] error
11239  *   Pointer to the error structure.
11240  *
11241  * @return
11242  *   0 on success, a negative errno value otherwise and rte_errno is set.
11243  */
11244 static int
11245 flow_dv_create_action_sample(struct rte_eth_dev *dev,
11246                              struct mlx5_flow *dev_flow,
11247                              uint32_t num_of_dest,
11248                              struct mlx5_flow_dv_sample_resource *res,
11249                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
11250                              void **sample_actions,
11251                              uint64_t action_flags,
11252                              struct rte_flow_error *error)
11253 {
11254         /* update normal path action resource into last index of array */
11255         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
11256         struct mlx5_flow_sub_actions_list *sample_act =
11257                                         &mdest_res->sample_act[dest_index];
11258         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11259         struct mlx5_flow_rss_desc *rss_desc;
11260         uint32_t normal_idx = 0;
11261         struct mlx5_hrxq *hrxq;
11262         uint32_t hrxq_idx;
11263
11264         MLX5_ASSERT(wks);
11265         rss_desc = &wks->rss_desc;
11266         if (num_of_dest > 1) {
11267                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
11268                         /* Handle QP action for mirroring */
11269                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11270                                                     rss_desc, &hrxq_idx);
11271                         if (!hrxq)
11272                                 return rte_flow_error_set
11273                                      (error, rte_errno,
11274                                       RTE_FLOW_ERROR_TYPE_ACTION,
11275                                       NULL,
11276                                       "cannot create rx queue");
11277                         normal_idx++;
11278                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
11279                         sample_act->dr_queue_action = hrxq->action;
11280                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11281                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11282                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11283                 }
11284                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
11285                         normal_idx++;
11286                         mdest_res->sample_idx[dest_index].rix_encap_decap =
11287                                 dev_flow->handle->dvh.rix_encap_decap;
11288                         sample_act->dr_encap_action =
11289                                 dev_flow->dv.encap_decap->action;
11290                         dev_flow->handle->dvh.rix_encap_decap = 0;
11291                 }
11292                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
11293                         normal_idx++;
11294                         mdest_res->sample_idx[dest_index].rix_port_id_action =
11295                                 dev_flow->handle->rix_port_id_action;
11296                         sample_act->dr_port_id_action =
11297                                 dev_flow->dv.port_id_action->action;
11298                         dev_flow->handle->rix_port_id_action = 0;
11299                 }
11300                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
11301                         normal_idx++;
11302                         mdest_res->sample_idx[dest_index].rix_jump =
11303                                 dev_flow->handle->rix_jump;
11304                         sample_act->dr_jump_action =
11305                                 dev_flow->dv.jump->action;
11306                         dev_flow->handle->rix_jump = 0;
11307                 }
11308                 sample_act->actions_num = normal_idx;
11309                 /* update sample action resource into first index of array */
11310                 mdest_res->ft_type = res->ft_type;
11311                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
11312                                 sizeof(struct mlx5_flow_sub_actions_idx));
11313                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
11314                                 sizeof(struct mlx5_flow_sub_actions_list));
11315                 mdest_res->num_of_dest = num_of_dest;
11316                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
11317                                                          dev_flow, error))
11318                         return rte_flow_error_set(error, EINVAL,
11319                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11320                                                   NULL, "can't create sample "
11321                                                   "action");
11322         } else {
11323                 res->sub_actions = sample_actions;
11324                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
11325                         return rte_flow_error_set(error, EINVAL,
11326                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11327                                                   NULL,
11328                                                   "can't create sample action");
11329         }
11330         return 0;
11331 }
11332
11333 /**
11334  * Remove an ASO age action from age actions list.
11335  *
11336  * @param[in] dev
11337  *   Pointer to the Ethernet device structure.
11338  * @param[in] age
11339  *   Pointer to the aso age action handler.
11340  */
11341 static void
11342 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
11343                                 struct mlx5_aso_age_action *age)
11344 {
11345         struct mlx5_age_info *age_info;
11346         struct mlx5_age_param *age_param = &age->age_params;
11347         struct mlx5_priv *priv = dev->data->dev_private;
11348         uint16_t expected = AGE_CANDIDATE;
11349
11350         age_info = GET_PORT_AGE_INFO(priv);
11351         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
11352                                          AGE_FREE, false, __ATOMIC_RELAXED,
11353                                          __ATOMIC_RELAXED)) {
11354                 /**
11355                  * We need the lock even it is age timeout,
11356                  * since age action may still in process.
11357                  */
11358                 rte_spinlock_lock(&age_info->aged_sl);
11359                 LIST_REMOVE(age, next);
11360                 rte_spinlock_unlock(&age_info->aged_sl);
11361                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
11362         }
11363 }
11364
11365 /**
11366  * Release an ASO age action.
11367  *
11368  * @param[in] dev
11369  *   Pointer to the Ethernet device structure.
11370  * @param[in] age_idx
11371  *   Index of ASO age action to release.
11372  * @param[in] flow
11373  *   True if the release operation is during flow destroy operation.
11374  *   False if the release operation is during action destroy operation.
11375  *
11376  * @return
11377  *   0 when age action was removed, otherwise the number of references.
11378  */
11379 static int
11380 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
11381 {
11382         struct mlx5_priv *priv = dev->data->dev_private;
11383         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11384         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
11385         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
11386
11387         if (!ret) {
11388                 flow_dv_aso_age_remove_from_age(dev, age);
11389                 rte_spinlock_lock(&mng->free_sl);
11390                 LIST_INSERT_HEAD(&mng->free, age, next);
11391                 rte_spinlock_unlock(&mng->free_sl);
11392         }
11393         return ret;
11394 }
11395
11396 /**
11397  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
11398  *
11399  * @param[in] dev
11400  *   Pointer to the Ethernet device structure.
11401  *
11402  * @return
11403  *   0 on success, otherwise negative errno value and rte_errno is set.
11404  */
11405 static int
11406 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
11407 {
11408         struct mlx5_priv *priv = dev->data->dev_private;
11409         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11410         void *old_pools = mng->pools;
11411         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
11412         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
11413         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11414
11415         if (!pools) {
11416                 rte_errno = ENOMEM;
11417                 return -ENOMEM;
11418         }
11419         if (old_pools) {
11420                 memcpy(pools, old_pools,
11421                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
11422                 mlx5_free(old_pools);
11423         } else {
11424                 /* First ASO flow hit allocation - starting ASO data-path. */
11425                 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
11426
11427                 if (ret) {
11428                         mlx5_free(pools);
11429                         return ret;
11430                 }
11431         }
11432         mng->n = resize;
11433         mng->pools = pools;
11434         return 0;
11435 }
11436
11437 /**
11438  * Create and initialize a new ASO aging pool.
11439  *
11440  * @param[in] dev
11441  *   Pointer to the Ethernet device structure.
11442  * @param[out] age_free
11443  *   Where to put the pointer of a new age action.
11444  *
11445  * @return
11446  *   The age actions pool pointer and @p age_free is set on success,
11447  *   NULL otherwise and rte_errno is set.
11448  */
11449 static struct mlx5_aso_age_pool *
11450 flow_dv_age_pool_create(struct rte_eth_dev *dev,
11451                         struct mlx5_aso_age_action **age_free)
11452 {
11453         struct mlx5_priv *priv = dev->data->dev_private;
11454         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11455         struct mlx5_aso_age_pool *pool = NULL;
11456         struct mlx5_devx_obj *obj = NULL;
11457         uint32_t i;
11458
11459         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
11460                                                     priv->sh->pdn);
11461         if (!obj) {
11462                 rte_errno = ENODATA;
11463                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
11464                 return NULL;
11465         }
11466         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
11467         if (!pool) {
11468                 claim_zero(mlx5_devx_cmd_destroy(obj));
11469                 rte_errno = ENOMEM;
11470                 return NULL;
11471         }
11472         pool->flow_hit_aso_obj = obj;
11473         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
11474         rte_spinlock_lock(&mng->resize_sl);
11475         pool->index = mng->next;
11476         /* Resize pools array if there is no room for the new pool in it. */
11477         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
11478                 claim_zero(mlx5_devx_cmd_destroy(obj));
11479                 mlx5_free(pool);
11480                 rte_spinlock_unlock(&mng->resize_sl);
11481                 return NULL;
11482         }
11483         mng->pools[pool->index] = pool;
11484         mng->next++;
11485         rte_spinlock_unlock(&mng->resize_sl);
11486         /* Assign the first action in the new pool, the rest go to free list. */
11487         *age_free = &pool->actions[0];
11488         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
11489                 pool->actions[i].offset = i;
11490                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
11491         }
11492         return pool;
11493 }
11494
11495 /**
11496  * Allocate a ASO aging bit.
11497  *
11498  * @param[in] dev
11499  *   Pointer to the Ethernet device structure.
11500  * @param[out] error
11501  *   Pointer to the error structure.
11502  *
11503  * @return
11504  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
11505  */
11506 static uint32_t
11507 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
11508 {
11509         struct mlx5_priv *priv = dev->data->dev_private;
11510         const struct mlx5_aso_age_pool *pool;
11511         struct mlx5_aso_age_action *age_free = NULL;
11512         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11513
11514         MLX5_ASSERT(mng);
11515         /* Try to get the next free age action bit. */
11516         rte_spinlock_lock(&mng->free_sl);
11517         age_free = LIST_FIRST(&mng->free);
11518         if (age_free) {
11519                 LIST_REMOVE(age_free, next);
11520         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
11521                 rte_spinlock_unlock(&mng->free_sl);
11522                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
11523                                    NULL, "failed to create ASO age pool");
11524                 return 0; /* 0 is an error. */
11525         }
11526         rte_spinlock_unlock(&mng->free_sl);
11527         pool = container_of
11528           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
11529                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
11530                                                                        actions);
11531         if (!age_free->dr_action) {
11532                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
11533                                                  error);
11534
11535                 if (reg_c < 0) {
11536                         rte_flow_error_set(error, rte_errno,
11537                                            RTE_FLOW_ERROR_TYPE_ACTION,
11538                                            NULL, "failed to get reg_c "
11539                                            "for ASO flow hit");
11540                         return 0; /* 0 is an error. */
11541                 }
11542 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
11543                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
11544                                 (priv->sh->rx_domain,
11545                                  pool->flow_hit_aso_obj->obj, age_free->offset,
11546                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
11547                                  (reg_c - REG_C_0));
11548 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
11549                 if (!age_free->dr_action) {
11550                         rte_errno = errno;
11551                         rte_spinlock_lock(&mng->free_sl);
11552                         LIST_INSERT_HEAD(&mng->free, age_free, next);
11553                         rte_spinlock_unlock(&mng->free_sl);
11554                         rte_flow_error_set(error, rte_errno,
11555                                            RTE_FLOW_ERROR_TYPE_ACTION,
11556                                            NULL, "failed to create ASO "
11557                                            "flow hit action");
11558                         return 0; /* 0 is an error. */
11559                 }
11560         }
11561         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
11562         return pool->index | ((age_free->offset + 1) << 16);
11563 }
11564
11565 /**
11566  * Initialize flow ASO age parameters.
11567  *
11568  * @param[in] dev
11569  *   Pointer to rte_eth_dev structure.
11570  * @param[in] age_idx
11571  *   Index of ASO age action.
11572  * @param[in] context
11573  *   Pointer to flow counter age context.
11574  * @param[in] timeout
11575  *   Aging timeout in seconds.
11576  *
11577  */
11578 static void
11579 flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
11580                             uint32_t age_idx,
11581                             void *context,
11582                             uint32_t timeout)
11583 {
11584         struct mlx5_aso_age_action *aso_age;
11585
11586         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
11587         MLX5_ASSERT(aso_age);
11588         aso_age->age_params.context = context;
11589         aso_age->age_params.timeout = timeout;
11590         aso_age->age_params.port_id = dev->data->port_id;
11591         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
11592                          __ATOMIC_RELAXED);
11593         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
11594                          __ATOMIC_RELAXED);
11595 }
11596
11597 static void
11598 flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
11599                                const struct rte_flow_item_integrity *value,
11600                                void *headers_m, void *headers_v)
11601 {
11602         if (mask->l4_ok) {
11603                 /* application l4_ok filter aggregates all hardware l4 filters
11604                  * therefore hw l4_checksum_ok must be implicitly added here.
11605                  */
11606                 struct rte_flow_item_integrity local_item;
11607
11608                 local_item.l4_csum_ok = 1;
11609                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
11610                          local_item.l4_csum_ok);
11611                 if (value->l4_ok) {
11612                         /* application l4_ok = 1 matches sets both hw flags
11613                          * l4_ok and l4_checksum_ok flags to 1.
11614                          */
11615                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11616                                  l4_checksum_ok, local_item.l4_csum_ok);
11617                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok,
11618                                  mask->l4_ok);
11619                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok,
11620                                  value->l4_ok);
11621                 } else {
11622                         /* application l4_ok = 0 matches on hw flag
11623                          * l4_checksum_ok = 0 only.
11624                          */
11625                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11626                                  l4_checksum_ok, 0);
11627                 }
11628         } else if (mask->l4_csum_ok) {
11629                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
11630                          mask->l4_csum_ok);
11631                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
11632                          value->l4_csum_ok);
11633         }
11634 }
11635
11636 static void
11637 flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
11638                                const struct rte_flow_item_integrity *value,
11639                                void *headers_m, void *headers_v,
11640                                bool is_ipv4)
11641 {
11642         if (mask->l3_ok) {
11643                 /* application l3_ok filter aggregates all hardware l3 filters
11644                  * therefore hw ipv4_checksum_ok must be implicitly added here.
11645                  */
11646                 struct rte_flow_item_integrity local_item;
11647
11648                 local_item.ipv4_csum_ok = !!is_ipv4;
11649                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
11650                          local_item.ipv4_csum_ok);
11651                 if (value->l3_ok) {
11652                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11653                                  ipv4_checksum_ok, local_item.ipv4_csum_ok);
11654                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok,
11655                                  mask->l3_ok);
11656                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
11657                                  value->l3_ok);
11658                 } else {
11659                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11660                                  ipv4_checksum_ok, 0);
11661                 }
11662         } else if (mask->ipv4_csum_ok) {
11663                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
11664                          mask->ipv4_csum_ok);
11665                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
11666                          value->ipv4_csum_ok);
11667         }
11668 }
11669
11670 static void
11671 flow_dv_translate_item_integrity(void *matcher, void *key,
11672                                  const struct rte_flow_item *head_item,
11673                                  const struct rte_flow_item *integrity_item)
11674 {
11675         const struct rte_flow_item_integrity *mask = integrity_item->mask;
11676         const struct rte_flow_item_integrity *value = integrity_item->spec;
11677         const struct rte_flow_item *tunnel_item, *end_item, *item;
11678         void *headers_m;
11679         void *headers_v;
11680         uint32_t l3_protocol;
11681
11682         if (!value)
11683                 return;
11684         if (!mask)
11685                 mask = &rte_flow_item_integrity_mask;
11686         if (value->level > 1) {
11687                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
11688                                          inner_headers);
11689                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
11690         } else {
11691                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
11692                                          outer_headers);
11693                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
11694         }
11695         tunnel_item = mlx5_flow_find_tunnel_item(head_item);
11696         if (value->level > 1) {
11697                 /* tunnel item was verified during the item validation */
11698                 item = tunnel_item;
11699                 end_item = mlx5_find_end_item(tunnel_item);
11700         } else {
11701                 item = head_item;
11702                 end_item = tunnel_item ? tunnel_item :
11703                            mlx5_find_end_item(integrity_item);
11704         }
11705         l3_protocol = mask->l3_ok ?
11706                       mlx5_flow_locate_proto_l3(&item, end_item) : 0;
11707         flow_dv_translate_integrity_l3(mask, value, headers_m, headers_v,
11708                                        l3_protocol == RTE_ETHER_TYPE_IPV4);
11709         flow_dv_translate_integrity_l4(mask, value, headers_m, headers_v);
11710 }
11711
11712 /**
11713  * Prepares DV flow counter with aging configuration.
11714  * Gets it by index when exists, creates a new one when doesn't.
11715  *
11716  * @param[in] dev
11717  *   Pointer to rte_eth_dev structure.
11718  * @param[in] dev_flow
11719  *   Pointer to the mlx5_flow.
11720  * @param[in, out] flow
11721  *   Pointer to the sub flow.
11722  * @param[in] count
11723  *   Pointer to the counter action configuration.
11724  * @param[in] age
11725  *   Pointer to the aging action configuration.
11726  * @param[out] error
11727  *   Pointer to the error structure.
11728  *
11729  * @return
11730  *   Pointer to the counter, NULL otherwise.
11731  */
11732 static struct mlx5_flow_counter *
11733 flow_dv_prepare_counter(struct rte_eth_dev *dev,
11734                         struct mlx5_flow *dev_flow,
11735                         struct rte_flow *flow,
11736                         const struct rte_flow_action_count *count,
11737                         const struct rte_flow_action_age *age,
11738                         struct rte_flow_error *error)
11739 {
11740         if (!flow->counter) {
11741                 flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
11742                                                                  count, age);
11743                 if (!flow->counter) {
11744                         rte_flow_error_set(error, rte_errno,
11745                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11746                                            "cannot create counter object.");
11747                         return NULL;
11748                 }
11749         }
11750         return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
11751 }
11752
11753 /*
11754  * Release an ASO CT action by its own device.
11755  *
11756  * @param[in] dev
11757  *   Pointer to the Ethernet device structure.
11758  * @param[in] idx
11759  *   Index of ASO CT action to release.
11760  *
11761  * @return
11762  *   0 when CT action was removed, otherwise the number of references.
11763  */
11764 static inline int
11765 flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx)
11766 {
11767         struct mlx5_priv *priv = dev->data->dev_private;
11768         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11769         uint32_t ret;
11770         struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
11771         enum mlx5_aso_ct_state state =
11772                         __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
11773
11774         /* Cannot release when CT is in the ASO SQ. */
11775         if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
11776                 return -1;
11777         ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED);
11778         if (!ret) {
11779                 if (ct->dr_action_orig) {
11780 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11781                         claim_zero(mlx5_glue->destroy_flow_action
11782                                         (ct->dr_action_orig));
11783 #endif
11784                         ct->dr_action_orig = NULL;
11785                 }
11786                 if (ct->dr_action_rply) {
11787 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11788                         claim_zero(mlx5_glue->destroy_flow_action
11789                                         (ct->dr_action_rply));
11790 #endif
11791                         ct->dr_action_rply = NULL;
11792                 }
11793                 /* Clear the state to free, no need in 1st allocation. */
11794                 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE);
11795                 rte_spinlock_lock(&mng->ct_sl);
11796                 LIST_INSERT_HEAD(&mng->free_cts, ct, next);
11797                 rte_spinlock_unlock(&mng->ct_sl);
11798         }
11799         return (int)ret;
11800 }
11801
11802 static inline int
11803 flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx)
11804 {
11805         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
11806         uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
11807         struct rte_eth_dev *owndev = &rte_eth_devices[owner];
11808         RTE_SET_USED(dev);
11809
11810         MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
11811         if (dev->data->dev_started != 1)
11812                 return -1;
11813         return flow_dv_aso_ct_dev_release(owndev, idx);
11814 }
11815
11816 /*
11817  * Resize the ASO CT pools array by 64 pools.
11818  *
11819  * @param[in] dev
11820  *   Pointer to the Ethernet device structure.
11821  *
11822  * @return
11823  *   0 on success, otherwise negative errno value and rte_errno is set.
11824  */
11825 static int
11826 flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev)
11827 {
11828         struct mlx5_priv *priv = dev->data->dev_private;
11829         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11830         void *old_pools = mng->pools;
11831         /* Magic number now, need a macro. */
11832         uint32_t resize = mng->n + 64;
11833         uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize;
11834         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11835
11836         if (!pools) {
11837                 rte_errno = ENOMEM;
11838                 return -rte_errno;
11839         }
11840         rte_rwlock_write_lock(&mng->resize_rwl);
11841         /* ASO SQ/QP was already initialized in the startup. */
11842         if (old_pools) {
11843                 /* Realloc could be an alternative choice. */
11844                 rte_memcpy(pools, old_pools,
11845                            mng->n * sizeof(struct mlx5_aso_ct_pool *));
11846                 mlx5_free(old_pools);
11847         }
11848         mng->n = resize;
11849         mng->pools = pools;
11850         rte_rwlock_write_unlock(&mng->resize_rwl);
11851         return 0;
11852 }
11853
11854 /*
11855  * Create and initialize a new ASO CT pool.
11856  *
11857  * @param[in] dev
11858  *   Pointer to the Ethernet device structure.
11859  * @param[out] ct_free
11860  *   Where to put the pointer of a new CT action.
11861  *
11862  * @return
11863  *   The CT actions pool pointer and @p ct_free is set on success,
11864  *   NULL otherwise and rte_errno is set.
11865  */
11866 static struct mlx5_aso_ct_pool *
11867 flow_dv_ct_pool_create(struct rte_eth_dev *dev,
11868                        struct mlx5_aso_ct_action **ct_free)
11869 {
11870         struct mlx5_priv *priv = dev->data->dev_private;
11871         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11872         struct mlx5_aso_ct_pool *pool = NULL;
11873         struct mlx5_devx_obj *obj = NULL;
11874         uint32_t i;
11875         uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
11876
11877         obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->ctx,
11878                                                 priv->sh->pdn, log_obj_size);
11879         if (!obj) {
11880                 rte_errno = ENODATA;
11881                 DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
11882                 return NULL;
11883         }
11884         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
11885         if (!pool) {
11886                 rte_errno = ENOMEM;
11887                 claim_zero(mlx5_devx_cmd_destroy(obj));
11888                 return NULL;
11889         }
11890         pool->devx_obj = obj;
11891         pool->index = mng->next;
11892         /* Resize pools array if there is no room for the new pool in it. */
11893         if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) {
11894                 claim_zero(mlx5_devx_cmd_destroy(obj));
11895                 mlx5_free(pool);
11896                 return NULL;
11897         }
11898         mng->pools[pool->index] = pool;
11899         mng->next++;
11900         /* Assign the first action in the new pool, the rest go to free list. */
11901         *ct_free = &pool->actions[0];
11902         /* Lock outside, the list operation is safe here. */
11903         for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
11904                 /* refcnt is 0 when allocating the memory. */
11905                 pool->actions[i].offset = i;
11906                 LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next);
11907         }
11908         return pool;
11909 }
11910
11911 /*
11912  * Allocate a ASO CT action from free list.
11913  *
11914  * @param[in] dev
11915  *   Pointer to the Ethernet device structure.
11916  * @param[out] error
11917  *   Pointer to the error structure.
11918  *
11919  * @return
11920  *   Index to ASO CT action on success, 0 otherwise and rte_errno is set.
11921  */
11922 static uint32_t
11923 flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
11924 {
11925         struct mlx5_priv *priv = dev->data->dev_private;
11926         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11927         struct mlx5_aso_ct_action *ct = NULL;
11928         struct mlx5_aso_ct_pool *pool;
11929         uint8_t reg_c;
11930         uint32_t ct_idx;
11931
11932         MLX5_ASSERT(mng);
11933         if (!priv->config.devx) {
11934                 rte_errno = ENOTSUP;
11935                 return 0;
11936         }
11937         /* Get a free CT action, if no, a new pool will be created. */
11938         rte_spinlock_lock(&mng->ct_sl);
11939         ct = LIST_FIRST(&mng->free_cts);
11940         if (ct) {
11941                 LIST_REMOVE(ct, next);
11942         } else if (!flow_dv_ct_pool_create(dev, &ct)) {
11943                 rte_spinlock_unlock(&mng->ct_sl);
11944                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
11945                                    NULL, "failed to create ASO CT pool");
11946                 return 0;
11947         }
11948         rte_spinlock_unlock(&mng->ct_sl);
11949         pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
11950         ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
11951         /* 0: inactive, 1: created, 2+: used by flows. */
11952         __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
11953         reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
11954         if (!ct->dr_action_orig) {
11955 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11956                 ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso
11957                         (priv->sh->rx_domain, pool->devx_obj->obj,
11958                          ct->offset,
11959                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR,
11960                          reg_c - REG_C_0);
11961 #else
11962                 RTE_SET_USED(reg_c);
11963 #endif
11964                 if (!ct->dr_action_orig) {
11965                         flow_dv_aso_ct_dev_release(dev, ct_idx);
11966                         rte_flow_error_set(error, rte_errno,
11967                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11968                                            "failed to create ASO CT action");
11969                         return 0;
11970                 }
11971         }
11972         if (!ct->dr_action_rply) {
11973 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11974                 ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso
11975                         (priv->sh->rx_domain, pool->devx_obj->obj,
11976                          ct->offset,
11977                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER,
11978                          reg_c - REG_C_0);
11979 #endif
11980                 if (!ct->dr_action_rply) {
11981                         flow_dv_aso_ct_dev_release(dev, ct_idx);
11982                         rte_flow_error_set(error, rte_errno,
11983                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11984                                            "failed to create ASO CT action");
11985                         return 0;
11986                 }
11987         }
11988         return ct_idx;
11989 }
11990
11991 /*
11992  * Create a conntrack object with context and actions by using ASO mechanism.
11993  *
11994  * @param[in] dev
11995  *   Pointer to rte_eth_dev structure.
11996  * @param[in] pro
11997  *   Pointer to conntrack information profile.
11998  * @param[out] error
11999  *   Pointer to the error structure.
12000  *
12001  * @return
12002  *   Index to conntrack object on success, 0 otherwise.
12003  */
12004 static uint32_t
12005 flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
12006                                    const struct rte_flow_action_conntrack *pro,
12007                                    struct rte_flow_error *error)
12008 {
12009         struct mlx5_priv *priv = dev->data->dev_private;
12010         struct mlx5_dev_ctx_shared *sh = priv->sh;
12011         struct mlx5_aso_ct_action *ct;
12012         uint32_t idx;
12013
12014         if (!sh->ct_aso_en)
12015                 return rte_flow_error_set(error, ENOTSUP,
12016                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12017                                           "Connection is not supported");
12018         idx = flow_dv_aso_ct_alloc(dev, error);
12019         if (!idx)
12020                 return rte_flow_error_set(error, rte_errno,
12021                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12022                                           "Failed to allocate CT object");
12023         ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12024         if (mlx5_aso_ct_update_by_wqe(sh, ct, pro))
12025                 return rte_flow_error_set(error, EBUSY,
12026                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12027                                           "Failed to update CT");
12028         ct->is_original = !!pro->is_original_dir;
12029         ct->peer = pro->peer_port;
12030         return idx;
12031 }
12032
12033 /**
12034  * Fill the flow with DV spec, lock free
12035  * (mutex should be acquired by caller).
12036  *
12037  * @param[in] dev
12038  *   Pointer to rte_eth_dev structure.
12039  * @param[in, out] dev_flow
12040  *   Pointer to the sub flow.
12041  * @param[in] attr
12042  *   Pointer to the flow attributes.
12043  * @param[in] items
12044  *   Pointer to the list of items.
12045  * @param[in] actions
12046  *   Pointer to the list of actions.
12047  * @param[out] error
12048  *   Pointer to the error structure.
12049  *
12050  * @return
12051  *   0 on success, a negative errno value otherwise and rte_errno is set.
12052  */
12053 static int
12054 flow_dv_translate(struct rte_eth_dev *dev,
12055                   struct mlx5_flow *dev_flow,
12056                   const struct rte_flow_attr *attr,
12057                   const struct rte_flow_item items[],
12058                   const struct rte_flow_action actions[],
12059                   struct rte_flow_error *error)
12060 {
12061         struct mlx5_priv *priv = dev->data->dev_private;
12062         struct mlx5_dev_config *dev_conf = &priv->config;
12063         struct rte_flow *flow = dev_flow->flow;
12064         struct mlx5_flow_handle *handle = dev_flow->handle;
12065         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12066         struct mlx5_flow_rss_desc *rss_desc;
12067         uint64_t item_flags = 0;
12068         uint64_t last_item = 0;
12069         uint64_t action_flags = 0;
12070         struct mlx5_flow_dv_matcher matcher = {
12071                 .mask = {
12072                         .size = sizeof(matcher.mask.buf) -
12073                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
12074                 },
12075         };
12076         int actions_n = 0;
12077         bool actions_end = false;
12078         union {
12079                 struct mlx5_flow_dv_modify_hdr_resource res;
12080                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
12081                             sizeof(struct mlx5_modification_cmd) *
12082                             (MLX5_MAX_MODIFY_NUM + 1)];
12083         } mhdr_dummy;
12084         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
12085         const struct rte_flow_action_count *count = NULL;
12086         const struct rte_flow_action_age *non_shared_age = NULL;
12087         union flow_dv_attr flow_attr = { .attr = 0 };
12088         uint32_t tag_be;
12089         union mlx5_flow_tbl_key tbl_key;
12090         uint32_t modify_action_position = UINT32_MAX;
12091         void *match_mask = matcher.mask.buf;
12092         void *match_value = dev_flow->dv.value.buf;
12093         uint8_t next_protocol = 0xff;
12094         struct rte_vlan_hdr vlan = { 0 };
12095         struct mlx5_flow_dv_dest_array_resource mdest_res;
12096         struct mlx5_flow_dv_sample_resource sample_res;
12097         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
12098         const struct rte_flow_action_sample *sample = NULL;
12099         struct mlx5_flow_sub_actions_list *sample_act;
12100         uint32_t sample_act_pos = UINT32_MAX;
12101         uint32_t age_act_pos = UINT32_MAX;
12102         uint32_t num_of_dest = 0;
12103         int tmp_actions_n = 0;
12104         uint32_t table;
12105         int ret = 0;
12106         const struct mlx5_flow_tunnel *tunnel = NULL;
12107         struct flow_grp_info grp_info = {
12108                 .external = !!dev_flow->external,
12109                 .transfer = !!attr->transfer,
12110                 .fdb_def_rule = !!priv->fdb_def_rule,
12111                 .skip_scale = dev_flow->skip_scale &
12112                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
12113                 .std_tbl_fix = true,
12114         };
12115         const struct rte_flow_item *head_item = items;
12116
12117         if (!wks)
12118                 return rte_flow_error_set(error, ENOMEM,
12119                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12120                                           NULL,
12121                                           "failed to push flow workspace");
12122         rss_desc = &wks->rss_desc;
12123         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
12124         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
12125         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12126                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12127         /* update normal path action resource into last index of array */
12128         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
12129         if (is_tunnel_offload_active(dev)) {
12130                 if (dev_flow->tunnel) {
12131                         RTE_VERIFY(dev_flow->tof_type ==
12132                                    MLX5_TUNNEL_OFFLOAD_MISS_RULE);
12133                         tunnel = dev_flow->tunnel;
12134                 } else {
12135                         tunnel = mlx5_get_tof(items, actions,
12136                                               &dev_flow->tof_type);
12137                         dev_flow->tunnel = tunnel;
12138                 }
12139                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
12140                                         (dev, attr, tunnel, dev_flow->tof_type);
12141         }
12142         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12143                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12144         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
12145                                        &grp_info, error);
12146         if (ret)
12147                 return ret;
12148         dev_flow->dv.group = table;
12149         if (attr->transfer)
12150                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
12151         /* number of actions must be set to 0 in case of dirty stack. */
12152         mhdr_res->actions_num = 0;
12153         if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
12154                 /*
12155                  * do not add decap action if match rule drops packet
12156                  * HW rejects rules with decap & drop
12157                  *
12158                  * if tunnel match rule was inserted before matching tunnel set
12159                  * rule flow table used in the match rule must be registered.
12160                  * current implementation handles that in the
12161                  * flow_dv_match_register() at the function end.
12162                  */
12163                 bool add_decap = true;
12164                 const struct rte_flow_action *ptr = actions;
12165
12166                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
12167                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
12168                                 add_decap = false;
12169                                 break;
12170                         }
12171                 }
12172                 if (add_decap) {
12173                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12174                                                            attr->transfer,
12175                                                            error))
12176                                 return -rte_errno;
12177                         dev_flow->dv.actions[actions_n++] =
12178                                         dev_flow->dv.encap_decap->action;
12179                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12180                 }
12181         }
12182         for (; !actions_end ; actions++) {
12183                 const struct rte_flow_action_queue *queue;
12184                 const struct rte_flow_action_rss *rss;
12185                 const struct rte_flow_action *action = actions;
12186                 const uint8_t *rss_key;
12187                 struct mlx5_flow_tbl_resource *tbl;
12188                 struct mlx5_aso_age_action *age_act;
12189                 struct mlx5_flow_counter *cnt_act;
12190                 uint32_t port_id = 0;
12191                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
12192                 int action_type = actions->type;
12193                 const struct rte_flow_action *found_action = NULL;
12194                 uint32_t jump_group = 0;
12195                 uint32_t owner_idx;
12196                 struct mlx5_aso_ct_action *ct;
12197
12198                 if (!mlx5_flow_os_action_supported(action_type))
12199                         return rte_flow_error_set(error, ENOTSUP,
12200                                                   RTE_FLOW_ERROR_TYPE_ACTION,
12201                                                   actions,
12202                                                   "action not supported");
12203                 switch (action_type) {
12204                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
12205                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
12206                         break;
12207                 case RTE_FLOW_ACTION_TYPE_VOID:
12208                         break;
12209                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
12210                         if (flow_dv_translate_action_port_id(dev, action,
12211                                                              &port_id, error))
12212                                 return -rte_errno;
12213                         port_id_resource.port_id = port_id;
12214                         MLX5_ASSERT(!handle->rix_port_id_action);
12215                         if (flow_dv_port_id_action_resource_register
12216                             (dev, &port_id_resource, dev_flow, error))
12217                                 return -rte_errno;
12218                         dev_flow->dv.actions[actions_n++] =
12219                                         dev_flow->dv.port_id_action->action;
12220                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12221                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
12222                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12223                         num_of_dest++;
12224                         break;
12225                 case RTE_FLOW_ACTION_TYPE_FLAG:
12226                         action_flags |= MLX5_FLOW_ACTION_FLAG;
12227                         dev_flow->handle->mark = 1;
12228                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12229                                 struct rte_flow_action_mark mark = {
12230                                         .id = MLX5_FLOW_MARK_DEFAULT,
12231                                 };
12232
12233                                 if (flow_dv_convert_action_mark(dev, &mark,
12234                                                                 mhdr_res,
12235                                                                 error))
12236                                         return -rte_errno;
12237                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12238                                 break;
12239                         }
12240                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
12241                         /*
12242                          * Only one FLAG or MARK is supported per device flow
12243                          * right now. So the pointer to the tag resource must be
12244                          * zero before the register process.
12245                          */
12246                         MLX5_ASSERT(!handle->dvh.rix_tag);
12247                         if (flow_dv_tag_resource_register(dev, tag_be,
12248                                                           dev_flow, error))
12249                                 return -rte_errno;
12250                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12251                         dev_flow->dv.actions[actions_n++] =
12252                                         dev_flow->dv.tag_resource->action;
12253                         break;
12254                 case RTE_FLOW_ACTION_TYPE_MARK:
12255                         action_flags |= MLX5_FLOW_ACTION_MARK;
12256                         dev_flow->handle->mark = 1;
12257                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12258                                 const struct rte_flow_action_mark *mark =
12259                                         (const struct rte_flow_action_mark *)
12260                                                 actions->conf;
12261
12262                                 if (flow_dv_convert_action_mark(dev, mark,
12263                                                                 mhdr_res,
12264                                                                 error))
12265                                         return -rte_errno;
12266                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12267                                 break;
12268                         }
12269                         /* Fall-through */
12270                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
12271                         /* Legacy (non-extensive) MARK action. */
12272                         tag_be = mlx5_flow_mark_set
12273                               (((const struct rte_flow_action_mark *)
12274                                (actions->conf))->id);
12275                         MLX5_ASSERT(!handle->dvh.rix_tag);
12276                         if (flow_dv_tag_resource_register(dev, tag_be,
12277                                                           dev_flow, error))
12278                                 return -rte_errno;
12279                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12280                         dev_flow->dv.actions[actions_n++] =
12281                                         dev_flow->dv.tag_resource->action;
12282                         break;
12283                 case RTE_FLOW_ACTION_TYPE_SET_META:
12284                         if (flow_dv_convert_action_set_meta
12285                                 (dev, mhdr_res, attr,
12286                                  (const struct rte_flow_action_set_meta *)
12287                                   actions->conf, error))
12288                                 return -rte_errno;
12289                         action_flags |= MLX5_FLOW_ACTION_SET_META;
12290                         break;
12291                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
12292                         if (flow_dv_convert_action_set_tag
12293                                 (dev, mhdr_res,
12294                                  (const struct rte_flow_action_set_tag *)
12295                                   actions->conf, error))
12296                                 return -rte_errno;
12297                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12298                         break;
12299                 case RTE_FLOW_ACTION_TYPE_DROP:
12300                         action_flags |= MLX5_FLOW_ACTION_DROP;
12301                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
12302                         break;
12303                 case RTE_FLOW_ACTION_TYPE_QUEUE:
12304                         queue = actions->conf;
12305                         rss_desc->queue_num = 1;
12306                         rss_desc->queue[0] = queue->index;
12307                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
12308                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
12309                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
12310                         num_of_dest++;
12311                         break;
12312                 case RTE_FLOW_ACTION_TYPE_RSS:
12313                         rss = actions->conf;
12314                         memcpy(rss_desc->queue, rss->queue,
12315                                rss->queue_num * sizeof(uint16_t));
12316                         rss_desc->queue_num = rss->queue_num;
12317                         /* NULL RSS key indicates default RSS key. */
12318                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
12319                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12320                         /*
12321                          * rss->level and rss.types should be set in advance
12322                          * when expanding items for RSS.
12323                          */
12324                         action_flags |= MLX5_FLOW_ACTION_RSS;
12325                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
12326                                 MLX5_FLOW_FATE_SHARED_RSS :
12327                                 MLX5_FLOW_FATE_QUEUE;
12328                         break;
12329                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
12330                         flow->age = (uint32_t)(uintptr_t)(action->conf);
12331                         age_act = flow_aso_age_get_by_idx(dev, flow->age);
12332                         __atomic_fetch_add(&age_act->refcnt, 1,
12333                                            __ATOMIC_RELAXED);
12334                         age_act_pos = actions_n++;
12335                         action_flags |= MLX5_FLOW_ACTION_AGE;
12336                         break;
12337                 case RTE_FLOW_ACTION_TYPE_AGE:
12338                         non_shared_age = action->conf;
12339                         age_act_pos = actions_n++;
12340                         action_flags |= MLX5_FLOW_ACTION_AGE;
12341                         break;
12342                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
12343                         flow->counter = (uint32_t)(uintptr_t)(action->conf);
12344                         cnt_act = flow_dv_counter_get_by_idx(dev, flow->counter,
12345                                                              NULL);
12346                         __atomic_fetch_add(&cnt_act->shared_info.refcnt, 1,
12347                                            __ATOMIC_RELAXED);
12348                         /* Save information first, will apply later. */
12349                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12350                         break;
12351                 case RTE_FLOW_ACTION_TYPE_COUNT:
12352                         if (!dev_conf->devx) {
12353                                 return rte_flow_error_set
12354                                               (error, ENOTSUP,
12355                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12356                                                NULL,
12357                                                "count action not supported");
12358                         }
12359                         /* Save information first, will apply later. */
12360                         count = action->conf;
12361                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12362                         break;
12363                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
12364                         dev_flow->dv.actions[actions_n++] =
12365                                                 priv->sh->pop_vlan_action;
12366                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
12367                         break;
12368                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
12369                         if (!(action_flags &
12370                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
12371                                 flow_dev_get_vlan_info_from_items(items, &vlan);
12372                         vlan.eth_proto = rte_be_to_cpu_16
12373                              ((((const struct rte_flow_action_of_push_vlan *)
12374                                                    actions->conf)->ethertype));
12375                         found_action = mlx5_flow_find_action
12376                                         (actions + 1,
12377                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
12378                         if (found_action)
12379                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12380                         found_action = mlx5_flow_find_action
12381                                         (actions + 1,
12382                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
12383                         if (found_action)
12384                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12385                         if (flow_dv_create_action_push_vlan
12386                                             (dev, attr, &vlan, dev_flow, error))
12387                                 return -rte_errno;
12388                         dev_flow->dv.actions[actions_n++] =
12389                                         dev_flow->dv.push_vlan_res->action;
12390                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
12391                         break;
12392                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
12393                         /* of_vlan_push action handled this action */
12394                         MLX5_ASSERT(action_flags &
12395                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
12396                         break;
12397                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
12398                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
12399                                 break;
12400                         flow_dev_get_vlan_info_from_items(items, &vlan);
12401                         mlx5_update_vlan_vid_pcp(actions, &vlan);
12402                         /* If no VLAN push - this is a modify header action */
12403                         if (flow_dv_convert_action_modify_vlan_vid
12404                                                 (mhdr_res, actions, error))
12405                                 return -rte_errno;
12406                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
12407                         break;
12408                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
12409                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
12410                         if (flow_dv_create_action_l2_encap(dev, actions,
12411                                                            dev_flow,
12412                                                            attr->transfer,
12413                                                            error))
12414                                 return -rte_errno;
12415                         dev_flow->dv.actions[actions_n++] =
12416                                         dev_flow->dv.encap_decap->action;
12417                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12418                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12419                                 sample_act->action_flags |=
12420                                                         MLX5_FLOW_ACTION_ENCAP;
12421                         break;
12422                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
12423                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
12424                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12425                                                            attr->transfer,
12426                                                            error))
12427                                 return -rte_errno;
12428                         dev_flow->dv.actions[actions_n++] =
12429                                         dev_flow->dv.encap_decap->action;
12430                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12431                         break;
12432                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
12433                         /* Handle encap with preceding decap. */
12434                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
12435                                 if (flow_dv_create_action_raw_encap
12436                                         (dev, actions, dev_flow, attr, error))
12437                                         return -rte_errno;
12438                                 dev_flow->dv.actions[actions_n++] =
12439                                         dev_flow->dv.encap_decap->action;
12440                         } else {
12441                                 /* Handle encap without preceding decap. */
12442                                 if (flow_dv_create_action_l2_encap
12443                                     (dev, actions, dev_flow, attr->transfer,
12444                                      error))
12445                                         return -rte_errno;
12446                                 dev_flow->dv.actions[actions_n++] =
12447                                         dev_flow->dv.encap_decap->action;
12448                         }
12449                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12450                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12451                                 sample_act->action_flags |=
12452                                                         MLX5_FLOW_ACTION_ENCAP;
12453                         break;
12454                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
12455                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
12456                                 ;
12457                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
12458                                 if (flow_dv_create_action_l2_decap
12459                                     (dev, dev_flow, attr->transfer, error))
12460                                         return -rte_errno;
12461                                 dev_flow->dv.actions[actions_n++] =
12462                                         dev_flow->dv.encap_decap->action;
12463                         }
12464                         /* If decap is followed by encap, handle it at encap. */
12465                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12466                         break;
12467                 case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
12468                         dev_flow->dv.actions[actions_n++] =
12469                                 (void *)(uintptr_t)action->conf;
12470                         action_flags |= MLX5_FLOW_ACTION_JUMP;
12471                         break;
12472                 case RTE_FLOW_ACTION_TYPE_JUMP:
12473                         jump_group = ((const struct rte_flow_action_jump *)
12474                                                         action->conf)->group;
12475                         grp_info.std_tbl_fix = 0;
12476                         if (dev_flow->skip_scale &
12477                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
12478                                 grp_info.skip_scale = 1;
12479                         else
12480                                 grp_info.skip_scale = 0;
12481                         ret = mlx5_flow_group_to_table(dev, tunnel,
12482                                                        jump_group,
12483                                                        &table,
12484                                                        &grp_info, error);
12485                         if (ret)
12486                                 return ret;
12487                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
12488                                                        attr->transfer,
12489                                                        !!dev_flow->external,
12490                                                        tunnel, jump_group, 0,
12491                                                        0, error);
12492                         if (!tbl)
12493                                 return rte_flow_error_set
12494                                                 (error, errno,
12495                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12496                                                  NULL,
12497                                                  "cannot create jump action.");
12498                         if (flow_dv_jump_tbl_resource_register
12499                             (dev, tbl, dev_flow, error)) {
12500                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
12501                                 return rte_flow_error_set
12502                                                 (error, errno,
12503                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12504                                                  NULL,
12505                                                  "cannot create jump action.");
12506                         }
12507                         dev_flow->dv.actions[actions_n++] =
12508                                         dev_flow->dv.jump->action;
12509                         action_flags |= MLX5_FLOW_ACTION_JUMP;
12510                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
12511                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
12512                         num_of_dest++;
12513                         break;
12514                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
12515                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
12516                         if (flow_dv_convert_action_modify_mac
12517                                         (mhdr_res, actions, error))
12518                                 return -rte_errno;
12519                         action_flags |= actions->type ==
12520                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
12521                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
12522                                         MLX5_FLOW_ACTION_SET_MAC_DST;
12523                         break;
12524                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
12525                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
12526                         if (flow_dv_convert_action_modify_ipv4
12527                                         (mhdr_res, actions, error))
12528                                 return -rte_errno;
12529                         action_flags |= actions->type ==
12530                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
12531                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
12532                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
12533                         break;
12534                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
12535                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
12536                         if (flow_dv_convert_action_modify_ipv6
12537                                         (mhdr_res, actions, error))
12538                                 return -rte_errno;
12539                         action_flags |= actions->type ==
12540                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
12541                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
12542                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
12543                         break;
12544                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
12545                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
12546                         if (flow_dv_convert_action_modify_tp
12547                                         (mhdr_res, actions, items,
12548                                          &flow_attr, dev_flow, !!(action_flags &
12549                                          MLX5_FLOW_ACTION_DECAP), error))
12550                                 return -rte_errno;
12551                         action_flags |= actions->type ==
12552                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
12553                                         MLX5_FLOW_ACTION_SET_TP_SRC :
12554                                         MLX5_FLOW_ACTION_SET_TP_DST;
12555                         break;
12556                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
12557                         if (flow_dv_convert_action_modify_dec_ttl
12558                                         (mhdr_res, items, &flow_attr, dev_flow,
12559                                          !!(action_flags &
12560                                          MLX5_FLOW_ACTION_DECAP), error))
12561                                 return -rte_errno;
12562                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
12563                         break;
12564                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
12565                         if (flow_dv_convert_action_modify_ttl
12566                                         (mhdr_res, actions, items, &flow_attr,
12567                                          dev_flow, !!(action_flags &
12568                                          MLX5_FLOW_ACTION_DECAP), error))
12569                                 return -rte_errno;
12570                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
12571                         break;
12572                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
12573                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
12574                         if (flow_dv_convert_action_modify_tcp_seq
12575                                         (mhdr_res, actions, error))
12576                                 return -rte_errno;
12577                         action_flags |= actions->type ==
12578                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
12579                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
12580                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
12581                         break;
12582
12583                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
12584                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
12585                         if (flow_dv_convert_action_modify_tcp_ack
12586                                         (mhdr_res, actions, error))
12587                                 return -rte_errno;
12588                         action_flags |= actions->type ==
12589                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
12590                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
12591                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
12592                         break;
12593                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
12594                         if (flow_dv_convert_action_set_reg
12595                                         (mhdr_res, actions, error))
12596                                 return -rte_errno;
12597                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12598                         break;
12599                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
12600                         if (flow_dv_convert_action_copy_mreg
12601                                         (dev, mhdr_res, actions, error))
12602                                 return -rte_errno;
12603                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12604                         break;
12605                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
12606                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
12607                         dev_flow->handle->fate_action =
12608                                         MLX5_FLOW_FATE_DEFAULT_MISS;
12609                         break;
12610                 case RTE_FLOW_ACTION_TYPE_METER:
12611                         if (!wks->fm)
12612                                 return rte_flow_error_set(error, rte_errno,
12613                                         RTE_FLOW_ERROR_TYPE_ACTION,
12614                                         NULL, "Failed to get meter in flow.");
12615                         /* Set the meter action. */
12616                         dev_flow->dv.actions[actions_n++] =
12617                                 wks->fm->meter_action;
12618                         action_flags |= MLX5_FLOW_ACTION_METER;
12619                         break;
12620                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
12621                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
12622                                                               actions, error))
12623                                 return -rte_errno;
12624                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
12625                         break;
12626                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
12627                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
12628                                                               actions, error))
12629                                 return -rte_errno;
12630                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
12631                         break;
12632                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
12633                         sample_act_pos = actions_n;
12634                         sample = (const struct rte_flow_action_sample *)
12635                                  action->conf;
12636                         actions_n++;
12637                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
12638                         /* put encap action into group if work with port id */
12639                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
12640                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
12641                                 sample_act->action_flags |=
12642                                                         MLX5_FLOW_ACTION_ENCAP;
12643                         break;
12644                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
12645                         if (flow_dv_convert_action_modify_field
12646                                         (dev, mhdr_res, actions, attr, error))
12647                                 return -rte_errno;
12648                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
12649                         break;
12650                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
12651                         owner_idx = (uint32_t)(uintptr_t)action->conf;
12652                         ct = flow_aso_ct_get_by_idx(dev, owner_idx);
12653                         if (!ct)
12654                                 return rte_flow_error_set(error, EINVAL,
12655                                                 RTE_FLOW_ERROR_TYPE_ACTION,
12656                                                 NULL,
12657                                                 "Failed to get CT object.");
12658                         if (mlx5_aso_ct_available(priv->sh, ct))
12659                                 return rte_flow_error_set(error, rte_errno,
12660                                                 RTE_FLOW_ERROR_TYPE_ACTION,
12661                                                 NULL,
12662                                                 "CT is unavailable.");
12663                         if (ct->is_original)
12664                                 dev_flow->dv.actions[actions_n] =
12665                                                         ct->dr_action_orig;
12666                         else
12667                                 dev_flow->dv.actions[actions_n] =
12668                                                         ct->dr_action_rply;
12669                         flow->indirect_type = MLX5_INDIRECT_ACTION_TYPE_CT;
12670                         flow->ct = owner_idx;
12671                         __atomic_fetch_add(&ct->refcnt, 1, __ATOMIC_RELAXED);
12672                         actions_n++;
12673                         action_flags |= MLX5_FLOW_ACTION_CT;
12674                         break;
12675                 case RTE_FLOW_ACTION_TYPE_END:
12676                         actions_end = true;
12677                         if (mhdr_res->actions_num) {
12678                                 /* create modify action if needed. */
12679                                 if (flow_dv_modify_hdr_resource_register
12680                                         (dev, mhdr_res, dev_flow, error))
12681                                         return -rte_errno;
12682                                 dev_flow->dv.actions[modify_action_position] =
12683                                         handle->dvh.modify_hdr->action;
12684                         }
12685                         /*
12686                          * Handle AGE and COUNT action by single HW counter
12687                          * when they are not shared.
12688                          */
12689                         if (action_flags & MLX5_FLOW_ACTION_AGE) {
12690                                 if ((non_shared_age &&
12691                                      count && !count->shared) ||
12692                                     !(priv->sh->flow_hit_aso_en &&
12693                                       (attr->group || attr->transfer))) {
12694                                         /* Creates age by counters. */
12695                                         cnt_act = flow_dv_prepare_counter
12696                                                                 (dev, dev_flow,
12697                                                                  flow, count,
12698                                                                  non_shared_age,
12699                                                                  error);
12700                                         if (!cnt_act)
12701                                                 return -rte_errno;
12702                                         dev_flow->dv.actions[age_act_pos] =
12703                                                                 cnt_act->action;
12704                                         break;
12705                                 }
12706                                 if (!flow->age && non_shared_age) {
12707                                         flow->age = flow_dv_aso_age_alloc
12708                                                                 (dev, error);
12709                                         if (!flow->age)
12710                                                 return -rte_errno;
12711                                         flow_dv_aso_age_params_init
12712                                                     (dev, flow->age,
12713                                                      non_shared_age->context ?
12714                                                      non_shared_age->context :
12715                                                      (void *)(uintptr_t)
12716                                                      (dev_flow->flow_idx),
12717                                                      non_shared_age->timeout);
12718                                 }
12719                                 age_act = flow_aso_age_get_by_idx(dev,
12720                                                                   flow->age);
12721                                 dev_flow->dv.actions[age_act_pos] =
12722                                                              age_act->dr_action;
12723                         }
12724                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
12725                                 /*
12726                                  * Create one count action, to be used
12727                                  * by all sub-flows.
12728                                  */
12729                                 cnt_act = flow_dv_prepare_counter(dev, dev_flow,
12730                                                                   flow, count,
12731                                                                   NULL, error);
12732                                 if (!cnt_act)
12733                                         return -rte_errno;
12734                                 dev_flow->dv.actions[actions_n++] =
12735                                                                 cnt_act->action;
12736                         }
12737                 default:
12738                         break;
12739                 }
12740                 if (mhdr_res->actions_num &&
12741                     modify_action_position == UINT32_MAX)
12742                         modify_action_position = actions_n++;
12743         }
12744         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
12745                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
12746                 int item_type = items->type;
12747
12748                 if (!mlx5_flow_os_item_supported(item_type))
12749                         return rte_flow_error_set(error, ENOTSUP,
12750                                                   RTE_FLOW_ERROR_TYPE_ITEM,
12751                                                   NULL, "item not supported");
12752                 switch (item_type) {
12753                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
12754                         flow_dv_translate_item_port_id
12755                                 (dev, match_mask, match_value, items, attr);
12756                         last_item = MLX5_FLOW_ITEM_PORT_ID;
12757                         break;
12758                 case RTE_FLOW_ITEM_TYPE_ETH:
12759                         flow_dv_translate_item_eth(match_mask, match_value,
12760                                                    items, tunnel,
12761                                                    dev_flow->dv.group);
12762                         matcher.priority = action_flags &
12763                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
12764                                         !dev_flow->external ?
12765                                         MLX5_PRIORITY_MAP_L3 :
12766                                         MLX5_PRIORITY_MAP_L2;
12767                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
12768                                              MLX5_FLOW_LAYER_OUTER_L2;
12769                         break;
12770                 case RTE_FLOW_ITEM_TYPE_VLAN:
12771                         flow_dv_translate_item_vlan(dev_flow,
12772                                                     match_mask, match_value,
12773                                                     items, tunnel,
12774                                                     dev_flow->dv.group);
12775                         matcher.priority = MLX5_PRIORITY_MAP_L2;
12776                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
12777                                               MLX5_FLOW_LAYER_INNER_VLAN) :
12778                                              (MLX5_FLOW_LAYER_OUTER_L2 |
12779                                               MLX5_FLOW_LAYER_OUTER_VLAN);
12780                         break;
12781                 case RTE_FLOW_ITEM_TYPE_IPV4:
12782                         mlx5_flow_tunnel_ip_check(items, next_protocol,
12783                                                   &item_flags, &tunnel);
12784                         flow_dv_translate_item_ipv4(match_mask, match_value,
12785                                                     items, tunnel,
12786                                                     dev_flow->dv.group);
12787                         matcher.priority = MLX5_PRIORITY_MAP_L3;
12788                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
12789                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
12790                         if (items->mask != NULL &&
12791                             ((const struct rte_flow_item_ipv4 *)
12792                              items->mask)->hdr.next_proto_id) {
12793                                 next_protocol =
12794                                         ((const struct rte_flow_item_ipv4 *)
12795                                          (items->spec))->hdr.next_proto_id;
12796                                 next_protocol &=
12797                                         ((const struct rte_flow_item_ipv4 *)
12798                                          (items->mask))->hdr.next_proto_id;
12799                         } else {
12800                                 /* Reset for inner layer. */
12801                                 next_protocol = 0xff;
12802                         }
12803                         break;
12804                 case RTE_FLOW_ITEM_TYPE_IPV6:
12805                         mlx5_flow_tunnel_ip_check(items, next_protocol,
12806                                                   &item_flags, &tunnel);
12807                         flow_dv_translate_item_ipv6(match_mask, match_value,
12808                                                     items, tunnel,
12809                                                     dev_flow->dv.group);
12810                         matcher.priority = MLX5_PRIORITY_MAP_L3;
12811                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
12812                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
12813                         if (items->mask != NULL &&
12814                             ((const struct rte_flow_item_ipv6 *)
12815                              items->mask)->hdr.proto) {
12816                                 next_protocol =
12817                                         ((const struct rte_flow_item_ipv6 *)
12818                                          items->spec)->hdr.proto;
12819                                 next_protocol &=
12820                                         ((const struct rte_flow_item_ipv6 *)
12821                                          items->mask)->hdr.proto;
12822                         } else {
12823                                 /* Reset for inner layer. */
12824                                 next_protocol = 0xff;
12825                         }
12826                         break;
12827                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
12828                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
12829                                                              match_value,
12830                                                              items, tunnel);
12831                         last_item = tunnel ?
12832                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
12833                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
12834                         if (items->mask != NULL &&
12835                             ((const struct rte_flow_item_ipv6_frag_ext *)
12836                              items->mask)->hdr.next_header) {
12837                                 next_protocol =
12838                                 ((const struct rte_flow_item_ipv6_frag_ext *)
12839                                  items->spec)->hdr.next_header;
12840                                 next_protocol &=
12841                                 ((const struct rte_flow_item_ipv6_frag_ext *)
12842                                  items->mask)->hdr.next_header;
12843                         } else {
12844                                 /* Reset for inner layer. */
12845                                 next_protocol = 0xff;
12846                         }
12847                         break;
12848                 case RTE_FLOW_ITEM_TYPE_TCP:
12849                         flow_dv_translate_item_tcp(match_mask, match_value,
12850                                                    items, tunnel);
12851                         matcher.priority = MLX5_PRIORITY_MAP_L4;
12852                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
12853                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
12854                         break;
12855                 case RTE_FLOW_ITEM_TYPE_UDP:
12856                         flow_dv_translate_item_udp(match_mask, match_value,
12857                                                    items, tunnel);
12858                         matcher.priority = MLX5_PRIORITY_MAP_L4;
12859                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
12860                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
12861                         break;
12862                 case RTE_FLOW_ITEM_TYPE_GRE:
12863                         flow_dv_translate_item_gre(match_mask, match_value,
12864                                                    items, tunnel);
12865                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12866                         last_item = MLX5_FLOW_LAYER_GRE;
12867                         break;
12868                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
12869                         flow_dv_translate_item_gre_key(match_mask,
12870                                                        match_value, items);
12871                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
12872                         break;
12873                 case RTE_FLOW_ITEM_TYPE_NVGRE:
12874                         flow_dv_translate_item_nvgre(match_mask, match_value,
12875                                                      items, tunnel);
12876                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12877                         last_item = MLX5_FLOW_LAYER_GRE;
12878                         break;
12879                 case RTE_FLOW_ITEM_TYPE_VXLAN:
12880                         flow_dv_translate_item_vxlan(match_mask, match_value,
12881                                                      items, tunnel);
12882                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12883                         last_item = MLX5_FLOW_LAYER_VXLAN;
12884                         break;
12885                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
12886                         flow_dv_translate_item_vxlan_gpe(match_mask,
12887                                                          match_value, items,
12888                                                          tunnel);
12889                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12890                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
12891                         break;
12892                 case RTE_FLOW_ITEM_TYPE_GENEVE:
12893                         flow_dv_translate_item_geneve(match_mask, match_value,
12894                                                       items, tunnel);
12895                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12896                         last_item = MLX5_FLOW_LAYER_GENEVE;
12897                         break;
12898                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
12899                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
12900                                                           match_value,
12901                                                           items, error);
12902                         if (ret)
12903                                 return rte_flow_error_set(error, -ret,
12904                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
12905                                         "cannot create GENEVE TLV option");
12906                         flow->geneve_tlv_option = 1;
12907                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
12908                         break;
12909                 case RTE_FLOW_ITEM_TYPE_MPLS:
12910                         flow_dv_translate_item_mpls(match_mask, match_value,
12911                                                     items, last_item, tunnel);
12912                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12913                         last_item = MLX5_FLOW_LAYER_MPLS;
12914                         break;
12915                 case RTE_FLOW_ITEM_TYPE_MARK:
12916                         flow_dv_translate_item_mark(dev, match_mask,
12917                                                     match_value, items);
12918                         last_item = MLX5_FLOW_ITEM_MARK;
12919                         break;
12920                 case RTE_FLOW_ITEM_TYPE_META:
12921                         flow_dv_translate_item_meta(dev, match_mask,
12922                                                     match_value, attr, items);
12923                         last_item = MLX5_FLOW_ITEM_METADATA;
12924                         break;
12925                 case RTE_FLOW_ITEM_TYPE_ICMP:
12926                         flow_dv_translate_item_icmp(match_mask, match_value,
12927                                                     items, tunnel);
12928                         last_item = MLX5_FLOW_LAYER_ICMP;
12929                         break;
12930                 case RTE_FLOW_ITEM_TYPE_ICMP6:
12931                         flow_dv_translate_item_icmp6(match_mask, match_value,
12932                                                       items, tunnel);
12933                         last_item = MLX5_FLOW_LAYER_ICMP6;
12934                         break;
12935                 case RTE_FLOW_ITEM_TYPE_TAG:
12936                         flow_dv_translate_item_tag(dev, match_mask,
12937                                                    match_value, items);
12938                         last_item = MLX5_FLOW_ITEM_TAG;
12939                         break;
12940                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
12941                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
12942                                                         match_value, items);
12943                         last_item = MLX5_FLOW_ITEM_TAG;
12944                         break;
12945                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
12946                         flow_dv_translate_item_tx_queue(dev, match_mask,
12947                                                         match_value,
12948                                                         items);
12949                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
12950                         break;
12951                 case RTE_FLOW_ITEM_TYPE_GTP:
12952                         flow_dv_translate_item_gtp(match_mask, match_value,
12953                                                    items, tunnel);
12954                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12955                         last_item = MLX5_FLOW_LAYER_GTP;
12956                         break;
12957                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
12958                         ret = flow_dv_translate_item_gtp_psc(match_mask,
12959                                                           match_value,
12960                                                           items);
12961                         if (ret)
12962                                 return rte_flow_error_set(error, -ret,
12963                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
12964                                         "cannot create GTP PSC item");
12965                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
12966                         break;
12967                 case RTE_FLOW_ITEM_TYPE_ECPRI:
12968                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
12969                                 /* Create it only the first time to be used. */
12970                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
12971                                 if (ret)
12972                                         return rte_flow_error_set
12973                                                 (error, -ret,
12974                                                 RTE_FLOW_ERROR_TYPE_ITEM,
12975                                                 NULL,
12976                                                 "cannot create eCPRI parser");
12977                         }
12978                         /* Adjust the length matcher and device flow value. */
12979                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
12980                         dev_flow->dv.value.size =
12981                                         MLX5_ST_SZ_BYTES(fte_match_param);
12982                         flow_dv_translate_item_ecpri(dev, match_mask,
12983                                                      match_value, items);
12984                         /* No other protocol should follow eCPRI layer. */
12985                         last_item = MLX5_FLOW_LAYER_ECPRI;
12986                         break;
12987                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
12988                         flow_dv_translate_item_integrity(match_mask,
12989                                                          match_value,
12990                                                          head_item, items);
12991                         break;
12992                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
12993                         flow_dv_translate_item_aso_ct(dev, match_mask,
12994                                                       match_value, items);
12995                         break;
12996                 default:
12997                         break;
12998                 }
12999                 item_flags |= last_item;
13000         }
13001         /*
13002          * When E-Switch mode is enabled, we have two cases where we need to
13003          * set the source port manually.
13004          * The first one, is in case of Nic steering rule, and the second is
13005          * E-Switch rule where no port_id item was found. In both cases
13006          * the source port is set according the current port in use.
13007          */
13008         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
13009             (priv->representor || priv->master)) {
13010                 if (flow_dv_translate_item_port_id(dev, match_mask,
13011                                                    match_value, NULL, attr))
13012                         return -rte_errno;
13013         }
13014 #ifdef RTE_LIBRTE_MLX5_DEBUG
13015         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
13016                                               dev_flow->dv.value.buf));
13017 #endif
13018         /*
13019          * Layers may be already initialized from prefix flow if this dev_flow
13020          * is the suffix flow.
13021          */
13022         handle->layers |= item_flags;
13023         if (action_flags & MLX5_FLOW_ACTION_RSS)
13024                 flow_dv_hashfields_set(dev_flow, rss_desc);
13025         /* If has RSS action in the sample action, the Sample/Mirror resource
13026          * should be registered after the hash filed be update.
13027          */
13028         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
13029                 ret = flow_dv_translate_action_sample(dev,
13030                                                       sample,
13031                                                       dev_flow, attr,
13032                                                       &num_of_dest,
13033                                                       sample_actions,
13034                                                       &sample_res,
13035                                                       error);
13036                 if (ret < 0)
13037                         return ret;
13038                 ret = flow_dv_create_action_sample(dev,
13039                                                    dev_flow,
13040                                                    num_of_dest,
13041                                                    &sample_res,
13042                                                    &mdest_res,
13043                                                    sample_actions,
13044                                                    action_flags,
13045                                                    error);
13046                 if (ret < 0)
13047                         return rte_flow_error_set
13048                                                 (error, rte_errno,
13049                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13050                                                 NULL,
13051                                                 "cannot create sample action");
13052                 if (num_of_dest > 1) {
13053                         dev_flow->dv.actions[sample_act_pos] =
13054                         dev_flow->dv.dest_array_res->action;
13055                 } else {
13056                         dev_flow->dv.actions[sample_act_pos] =
13057                         dev_flow->dv.sample_res->verbs_action;
13058                 }
13059         }
13060         /*
13061          * For multiple destination (sample action with ratio=1), the encap
13062          * action and port id action will be combined into group action.
13063          * So need remove the original these actions in the flow and only
13064          * use the sample action instead of.
13065          */
13066         if (num_of_dest > 1 &&
13067             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
13068                 int i;
13069                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
13070
13071                 for (i = 0; i < actions_n; i++) {
13072                         if ((sample_act->dr_encap_action &&
13073                                 sample_act->dr_encap_action ==
13074                                 dev_flow->dv.actions[i]) ||
13075                                 (sample_act->dr_port_id_action &&
13076                                 sample_act->dr_port_id_action ==
13077                                 dev_flow->dv.actions[i]) ||
13078                                 (sample_act->dr_jump_action &&
13079                                 sample_act->dr_jump_action ==
13080                                 dev_flow->dv.actions[i]))
13081                                 continue;
13082                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
13083                 }
13084                 memcpy((void *)dev_flow->dv.actions,
13085                                 (void *)temp_actions,
13086                                 tmp_actions_n * sizeof(void *));
13087                 actions_n = tmp_actions_n;
13088         }
13089         dev_flow->dv.actions_n = actions_n;
13090         dev_flow->act_flags = action_flags;
13091         if (wks->skip_matcher_reg)
13092                 return 0;
13093         /* Register matcher. */
13094         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
13095                                     matcher.mask.size);
13096         matcher.priority = mlx5_get_matcher_priority(dev, attr,
13097                                         matcher.priority);
13098         /**
13099          * When creating meter drop flow in drop table, using original
13100          * 5-tuple match, the matcher priority should be lower than
13101          * mtr_id matcher.
13102          */
13103         if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
13104             dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP &&
13105             matcher.priority <= MLX5_REG_BITS)
13106                 matcher.priority += MLX5_REG_BITS;
13107         /* reserved field no needs to be set to 0 here. */
13108         tbl_key.is_fdb = attr->transfer;
13109         tbl_key.is_egress = attr->egress;
13110         tbl_key.level = dev_flow->dv.group;
13111         tbl_key.id = dev_flow->dv.table_id;
13112         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
13113                                      tunnel, attr->group, error))
13114                 return -rte_errno;
13115         return 0;
13116 }
13117
13118 /**
13119  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13120  * and tunnel.
13121  *
13122  * @param[in, out] action
13123  *   Shred RSS action holding hash RX queue objects.
13124  * @param[in] hash_fields
13125  *   Defines combination of packet fields to participate in RX hash.
13126  * @param[in] tunnel
13127  *   Tunnel type
13128  * @param[in] hrxq_idx
13129  *   Hash RX queue index to set.
13130  *
13131  * @return
13132  *   0 on success, otherwise negative errno value.
13133  */
13134 static int
13135 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
13136                               const uint64_t hash_fields,
13137                               uint32_t hrxq_idx)
13138 {
13139         uint32_t *hrxqs = action->hrxq;
13140
13141         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13142         case MLX5_RSS_HASH_IPV4:
13143                 /* fall-through. */
13144         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13145                 /* fall-through. */
13146         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13147                 hrxqs[0] = hrxq_idx;
13148                 return 0;
13149         case MLX5_RSS_HASH_IPV4_TCP:
13150                 /* fall-through. */
13151         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13152                 /* fall-through. */
13153         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13154                 hrxqs[1] = hrxq_idx;
13155                 return 0;
13156         case MLX5_RSS_HASH_IPV4_UDP:
13157                 /* fall-through. */
13158         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13159                 /* fall-through. */
13160         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13161                 hrxqs[2] = hrxq_idx;
13162                 return 0;
13163         case MLX5_RSS_HASH_IPV6:
13164                 /* fall-through. */
13165         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13166                 /* fall-through. */
13167         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13168                 hrxqs[3] = hrxq_idx;
13169                 return 0;
13170         case MLX5_RSS_HASH_IPV6_TCP:
13171                 /* fall-through. */
13172         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13173                 /* fall-through. */
13174         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13175                 hrxqs[4] = hrxq_idx;
13176                 return 0;
13177         case MLX5_RSS_HASH_IPV6_UDP:
13178                 /* fall-through. */
13179         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13180                 /* fall-through. */
13181         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13182                 hrxqs[5] = hrxq_idx;
13183                 return 0;
13184         case MLX5_RSS_HASH_NONE:
13185                 hrxqs[6] = hrxq_idx;
13186                 return 0;
13187         default:
13188                 return -1;
13189         }
13190 }
13191
13192 /**
13193  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13194  * and tunnel.
13195  *
13196  * @param[in] dev
13197  *   Pointer to the Ethernet device structure.
13198  * @param[in] idx
13199  *   Shared RSS action ID holding hash RX queue objects.
13200  * @param[in] hash_fields
13201  *   Defines combination of packet fields to participate in RX hash.
13202  * @param[in] tunnel
13203  *   Tunnel type
13204  *
13205  * @return
13206  *   Valid hash RX queue index, otherwise 0.
13207  */
13208 static uint32_t
13209 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
13210                                  const uint64_t hash_fields)
13211 {
13212         struct mlx5_priv *priv = dev->data->dev_private;
13213         struct mlx5_shared_action_rss *shared_rss =
13214             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13215         const uint32_t *hrxqs = shared_rss->hrxq;
13216
13217         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13218         case MLX5_RSS_HASH_IPV4:
13219                 /* fall-through. */
13220         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13221                 /* fall-through. */
13222         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13223                 return hrxqs[0];
13224         case MLX5_RSS_HASH_IPV4_TCP:
13225                 /* fall-through. */
13226         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13227                 /* fall-through. */
13228         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13229                 return hrxqs[1];
13230         case MLX5_RSS_HASH_IPV4_UDP:
13231                 /* fall-through. */
13232         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13233                 /* fall-through. */
13234         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13235                 return hrxqs[2];
13236         case MLX5_RSS_HASH_IPV6:
13237                 /* fall-through. */
13238         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13239                 /* fall-through. */
13240         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13241                 return hrxqs[3];
13242         case MLX5_RSS_HASH_IPV6_TCP:
13243                 /* fall-through. */
13244         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13245                 /* fall-through. */
13246         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13247                 return hrxqs[4];
13248         case MLX5_RSS_HASH_IPV6_UDP:
13249                 /* fall-through. */
13250         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13251                 /* fall-through. */
13252         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13253                 return hrxqs[5];
13254         case MLX5_RSS_HASH_NONE:
13255                 return hrxqs[6];
13256         default:
13257                 return 0;
13258         }
13259
13260 }
13261
13262 /**
13263  * Apply the flow to the NIC, lock free,
13264  * (mutex should be acquired by caller).
13265  *
13266  * @param[in] dev
13267  *   Pointer to the Ethernet device structure.
13268  * @param[in, out] flow
13269  *   Pointer to flow structure.
13270  * @param[out] error
13271  *   Pointer to error structure.
13272  *
13273  * @return
13274  *   0 on success, a negative errno value otherwise and rte_errno is set.
13275  */
13276 static int
13277 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
13278               struct rte_flow_error *error)
13279 {
13280         struct mlx5_flow_dv_workspace *dv;
13281         struct mlx5_flow_handle *dh;
13282         struct mlx5_flow_handle_dv *dv_h;
13283         struct mlx5_flow *dev_flow;
13284         struct mlx5_priv *priv = dev->data->dev_private;
13285         uint32_t handle_idx;
13286         int n;
13287         int err;
13288         int idx;
13289         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
13290         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
13291
13292         MLX5_ASSERT(wks);
13293         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
13294                 dev_flow = &wks->flows[idx];
13295                 dv = &dev_flow->dv;
13296                 dh = dev_flow->handle;
13297                 dv_h = &dh->dvh;
13298                 n = dv->actions_n;
13299                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
13300                         if (dv->transfer) {
13301                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13302                                 dv->actions[n++] = priv->sh->dr_drop_action;
13303                         } else {
13304 #ifdef HAVE_MLX5DV_DR
13305                                 /* DR supports drop action placeholder. */
13306                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13307                                 dv->actions[n++] = priv->sh->dr_drop_action;
13308 #else
13309                                 /* For DV we use the explicit drop queue. */
13310                                 MLX5_ASSERT(priv->drop_queue.hrxq);
13311                                 dv->actions[n++] =
13312                                                 priv->drop_queue.hrxq->action;
13313 #endif
13314                         }
13315                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
13316                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
13317                         struct mlx5_hrxq *hrxq;
13318                         uint32_t hrxq_idx;
13319
13320                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
13321                                                     &hrxq_idx);
13322                         if (!hrxq) {
13323                                 rte_flow_error_set
13324                                         (error, rte_errno,
13325                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13326                                          "cannot get hash queue");
13327                                 goto error;
13328                         }
13329                         dh->rix_hrxq = hrxq_idx;
13330                         dv->actions[n++] = hrxq->action;
13331                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13332                         struct mlx5_hrxq *hrxq = NULL;
13333                         uint32_t hrxq_idx;
13334
13335                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
13336                                                 rss_desc->shared_rss,
13337                                                 dev_flow->hash_fields);
13338                         if (hrxq_idx)
13339                                 hrxq = mlx5_ipool_get
13340                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
13341                                          hrxq_idx);
13342                         if (!hrxq) {
13343                                 rte_flow_error_set
13344                                         (error, rte_errno,
13345                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13346                                          "cannot get hash queue");
13347                                 goto error;
13348                         }
13349                         dh->rix_srss = rss_desc->shared_rss;
13350                         dv->actions[n++] = hrxq->action;
13351                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
13352                         if (!priv->sh->default_miss_action) {
13353                                 rte_flow_error_set
13354                                         (error, rte_errno,
13355                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13356                                          "default miss action not be created.");
13357                                 goto error;
13358                         }
13359                         dv->actions[n++] = priv->sh->default_miss_action;
13360                 }
13361                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
13362                                                (void *)&dv->value, n,
13363                                                dv->actions, &dh->drv_flow);
13364                 if (err) {
13365                         rte_flow_error_set(error, errno,
13366                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13367                                            NULL,
13368                                            "hardware refuses to create flow");
13369                         goto error;
13370                 }
13371                 if (priv->vmwa_context &&
13372                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
13373                         /*
13374                          * The rule contains the VLAN pattern.
13375                          * For VF we are going to create VLAN
13376                          * interface to make hypervisor set correct
13377                          * e-Switch vport context.
13378                          */
13379                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
13380                 }
13381         }
13382         return 0;
13383 error:
13384         err = rte_errno; /* Save rte_errno before cleanup. */
13385         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
13386                        handle_idx, dh, next) {
13387                 /* hrxq is union, don't clear it if the flag is not set. */
13388                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
13389                         mlx5_hrxq_release(dev, dh->rix_hrxq);
13390                         dh->rix_hrxq = 0;
13391                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13392                         dh->rix_srss = 0;
13393                 }
13394                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
13395                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
13396         }
13397         rte_errno = err; /* Restore rte_errno. */
13398         return -rte_errno;
13399 }
13400
13401 void
13402 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
13403                           struct mlx5_cache_entry *entry)
13404 {
13405         struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
13406                                                           entry);
13407
13408         claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
13409         mlx5_free(cache);
13410 }
13411
13412 /**
13413  * Release the flow matcher.
13414  *
13415  * @param dev
13416  *   Pointer to Ethernet device.
13417  * @param port_id
13418  *   Index to port ID action resource.
13419  *
13420  * @return
13421  *   1 while a reference on it exists, 0 when freed.
13422  */
13423 static int
13424 flow_dv_matcher_release(struct rte_eth_dev *dev,
13425                         struct mlx5_flow_handle *handle)
13426 {
13427         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
13428         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
13429                                                             typeof(*tbl), tbl);
13430         int ret;
13431
13432         MLX5_ASSERT(matcher->matcher_object);
13433         ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
13434         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
13435         return ret;
13436 }
13437
13438 /**
13439  * Release encap_decap resource.
13440  *
13441  * @param list
13442  *   Pointer to the hash list.
13443  * @param entry
13444  *   Pointer to exist resource entry object.
13445  */
13446 void
13447 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
13448                               struct mlx5_hlist_entry *entry)
13449 {
13450         struct mlx5_dev_ctx_shared *sh = list->ctx;
13451         struct mlx5_flow_dv_encap_decap_resource *res =
13452                 container_of(entry, typeof(*res), entry);
13453
13454         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13455         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
13456 }
13457
13458 /**
13459  * Release an encap/decap resource.
13460  *
13461  * @param dev
13462  *   Pointer to Ethernet device.
13463  * @param encap_decap_idx
13464  *   Index of encap decap resource.
13465  *
13466  * @return
13467  *   1 while a reference on it exists, 0 when freed.
13468  */
13469 static int
13470 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
13471                                      uint32_t encap_decap_idx)
13472 {
13473         struct mlx5_priv *priv = dev->data->dev_private;
13474         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
13475
13476         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
13477                                         encap_decap_idx);
13478         if (!cache_resource)
13479                 return 0;
13480         MLX5_ASSERT(cache_resource->action);
13481         return mlx5_hlist_unregister(priv->sh->encaps_decaps,
13482                                      &cache_resource->entry);
13483 }
13484
13485 /**
13486  * Release an jump to table action resource.
13487  *
13488  * @param dev
13489  *   Pointer to Ethernet device.
13490  * @param rix_jump
13491  *   Index to the jump action resource.
13492  *
13493  * @return
13494  *   1 while a reference on it exists, 0 when freed.
13495  */
13496 static int
13497 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
13498                                   uint32_t rix_jump)
13499 {
13500         struct mlx5_priv *priv = dev->data->dev_private;
13501         struct mlx5_flow_tbl_data_entry *tbl_data;
13502
13503         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
13504                                   rix_jump);
13505         if (!tbl_data)
13506                 return 0;
13507         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
13508 }
13509
13510 void
13511 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
13512                          struct mlx5_hlist_entry *entry)
13513 {
13514         struct mlx5_flow_dv_modify_hdr_resource *res =
13515                 container_of(entry, typeof(*res), entry);
13516
13517         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13518         mlx5_free(entry);
13519 }
13520
13521 /**
13522  * Release a modify-header resource.
13523  *
13524  * @param dev
13525  *   Pointer to Ethernet device.
13526  * @param handle
13527  *   Pointer to mlx5_flow_handle.
13528  *
13529  * @return
13530  *   1 while a reference on it exists, 0 when freed.
13531  */
13532 static int
13533 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
13534                                     struct mlx5_flow_handle *handle)
13535 {
13536         struct mlx5_priv *priv = dev->data->dev_private;
13537         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
13538
13539         MLX5_ASSERT(entry->action);
13540         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
13541 }
13542
13543 void
13544 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
13545                           struct mlx5_cache_entry *entry)
13546 {
13547         struct mlx5_dev_ctx_shared *sh = list->ctx;
13548         struct mlx5_flow_dv_port_id_action_resource *cache =
13549                         container_of(entry, typeof(*cache), entry);
13550
13551         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
13552         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
13553 }
13554
13555 /**
13556  * Release port ID action resource.
13557  *
13558  * @param dev
13559  *   Pointer to Ethernet device.
13560  * @param handle
13561  *   Pointer to mlx5_flow_handle.
13562  *
13563  * @return
13564  *   1 while a reference on it exists, 0 when freed.
13565  */
13566 static int
13567 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
13568                                         uint32_t port_id)
13569 {
13570         struct mlx5_priv *priv = dev->data->dev_private;
13571         struct mlx5_flow_dv_port_id_action_resource *cache;
13572
13573         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
13574         if (!cache)
13575                 return 0;
13576         MLX5_ASSERT(cache->action);
13577         return mlx5_cache_unregister(&priv->sh->port_id_action_list,
13578                                      &cache->entry);
13579 }
13580
13581 /**
13582  * Release shared RSS action resource.
13583  *
13584  * @param dev
13585  *   Pointer to Ethernet device.
13586  * @param srss
13587  *   Shared RSS action index.
13588  */
13589 static void
13590 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
13591 {
13592         struct mlx5_priv *priv = dev->data->dev_private;
13593         struct mlx5_shared_action_rss *shared_rss;
13594
13595         shared_rss = mlx5_ipool_get
13596                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
13597         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
13598 }
13599
13600 void
13601 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
13602                             struct mlx5_cache_entry *entry)
13603 {
13604         struct mlx5_dev_ctx_shared *sh = list->ctx;
13605         struct mlx5_flow_dv_push_vlan_action_resource *cache =
13606                         container_of(entry, typeof(*cache), entry);
13607
13608         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
13609         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
13610 }
13611
13612 /**
13613  * Release push vlan action resource.
13614  *
13615  * @param dev
13616  *   Pointer to Ethernet device.
13617  * @param handle
13618  *   Pointer to mlx5_flow_handle.
13619  *
13620  * @return
13621  *   1 while a reference on it exists, 0 when freed.
13622  */
13623 static int
13624 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
13625                                           struct mlx5_flow_handle *handle)
13626 {
13627         struct mlx5_priv *priv = dev->data->dev_private;
13628         struct mlx5_flow_dv_push_vlan_action_resource *cache;
13629         uint32_t idx = handle->dvh.rix_push_vlan;
13630
13631         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
13632         if (!cache)
13633                 return 0;
13634         MLX5_ASSERT(cache->action);
13635         return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
13636                                      &cache->entry);
13637 }
13638
13639 /**
13640  * Release the fate resource.
13641  *
13642  * @param dev
13643  *   Pointer to Ethernet device.
13644  * @param handle
13645  *   Pointer to mlx5_flow_handle.
13646  */
13647 static void
13648 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
13649                                struct mlx5_flow_handle *handle)
13650 {
13651         if (!handle->rix_fate)
13652                 return;
13653         switch (handle->fate_action) {
13654         case MLX5_FLOW_FATE_QUEUE:
13655                 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
13656                         mlx5_hrxq_release(dev, handle->rix_hrxq);
13657                 break;
13658         case MLX5_FLOW_FATE_JUMP:
13659                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
13660                 break;
13661         case MLX5_FLOW_FATE_PORT_ID:
13662                 flow_dv_port_id_action_resource_release(dev,
13663                                 handle->rix_port_id_action);
13664                 break;
13665         default:
13666                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
13667                 break;
13668         }
13669         handle->rix_fate = 0;
13670 }
13671
13672 void
13673 flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused,
13674                          struct mlx5_cache_entry *entry)
13675 {
13676         struct mlx5_flow_dv_sample_resource *cache_resource =
13677                         container_of(entry, typeof(*cache_resource), entry);
13678         struct rte_eth_dev *dev = cache_resource->dev;
13679         struct mlx5_priv *priv = dev->data->dev_private;
13680
13681         if (cache_resource->verbs_action)
13682                 claim_zero(mlx5_flow_os_destroy_flow_action
13683                                 (cache_resource->verbs_action));
13684         if (cache_resource->normal_path_tbl)
13685                 flow_dv_tbl_resource_release(MLX5_SH(dev),
13686                         cache_resource->normal_path_tbl);
13687         flow_dv_sample_sub_actions_release(dev,
13688                                 &cache_resource->sample_idx);
13689         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
13690                         cache_resource->idx);
13691         DRV_LOG(DEBUG, "sample resource %p: removed",
13692                 (void *)cache_resource);
13693 }
13694
13695 /**
13696  * Release an sample resource.
13697  *
13698  * @param dev
13699  *   Pointer to Ethernet device.
13700  * @param handle
13701  *   Pointer to mlx5_flow_handle.
13702  *
13703  * @return
13704  *   1 while a reference on it exists, 0 when freed.
13705  */
13706 static int
13707 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
13708                                      struct mlx5_flow_handle *handle)
13709 {
13710         struct mlx5_priv *priv = dev->data->dev_private;
13711         struct mlx5_flow_dv_sample_resource *cache_resource;
13712
13713         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
13714                          handle->dvh.rix_sample);
13715         if (!cache_resource)
13716                 return 0;
13717         MLX5_ASSERT(cache_resource->verbs_action);
13718         return mlx5_cache_unregister(&priv->sh->sample_action_list,
13719                                      &cache_resource->entry);
13720 }
13721
13722 void
13723 flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list __rte_unused,
13724                              struct mlx5_cache_entry *entry)
13725 {
13726         struct mlx5_flow_dv_dest_array_resource *cache_resource =
13727                         container_of(entry, typeof(*cache_resource), entry);
13728         struct rte_eth_dev *dev = cache_resource->dev;
13729         struct mlx5_priv *priv = dev->data->dev_private;
13730         uint32_t i = 0;
13731
13732         MLX5_ASSERT(cache_resource->action);
13733         if (cache_resource->action)
13734                 claim_zero(mlx5_flow_os_destroy_flow_action
13735                                         (cache_resource->action));
13736         for (; i < cache_resource->num_of_dest; i++)
13737                 flow_dv_sample_sub_actions_release(dev,
13738                                 &cache_resource->sample_idx[i]);
13739         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
13740                         cache_resource->idx);
13741         DRV_LOG(DEBUG, "destination array resource %p: removed",
13742                 (void *)cache_resource);
13743 }
13744
13745 /**
13746  * Release an destination array resource.
13747  *
13748  * @param dev
13749  *   Pointer to Ethernet device.
13750  * @param handle
13751  *   Pointer to mlx5_flow_handle.
13752  *
13753  * @return
13754  *   1 while a reference on it exists, 0 when freed.
13755  */
13756 static int
13757 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
13758                                     struct mlx5_flow_handle *handle)
13759 {
13760         struct mlx5_priv *priv = dev->data->dev_private;
13761         struct mlx5_flow_dv_dest_array_resource *cache;
13762
13763         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
13764                                handle->dvh.rix_dest_array);
13765         if (!cache)
13766                 return 0;
13767         MLX5_ASSERT(cache->action);
13768         return mlx5_cache_unregister(&priv->sh->dest_array_list,
13769                                      &cache->entry);
13770 }
13771
13772 static void
13773 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
13774 {
13775         struct mlx5_priv *priv = dev->data->dev_private;
13776         struct mlx5_dev_ctx_shared *sh = priv->sh;
13777         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
13778                                 sh->geneve_tlv_option_resource;
13779         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
13780         if (geneve_opt_resource) {
13781                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
13782                                          __ATOMIC_RELAXED))) {
13783                         claim_zero(mlx5_devx_cmd_destroy
13784                                         (geneve_opt_resource->obj));
13785                         mlx5_free(sh->geneve_tlv_option_resource);
13786                         sh->geneve_tlv_option_resource = NULL;
13787                 }
13788         }
13789         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
13790 }
13791
13792 /**
13793  * Remove the flow from the NIC but keeps it in memory.
13794  * Lock free, (mutex should be acquired by caller).
13795  *
13796  * @param[in] dev
13797  *   Pointer to Ethernet device.
13798  * @param[in, out] flow
13799  *   Pointer to flow structure.
13800  */
13801 static void
13802 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
13803 {
13804         struct mlx5_flow_handle *dh;
13805         uint32_t handle_idx;
13806         struct mlx5_priv *priv = dev->data->dev_private;
13807
13808         if (!flow)
13809                 return;
13810         handle_idx = flow->dev_handles;
13811         while (handle_idx) {
13812                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
13813                                     handle_idx);
13814                 if (!dh)
13815                         return;
13816                 if (dh->drv_flow) {
13817                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
13818                         dh->drv_flow = NULL;
13819                 }
13820                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
13821                         flow_dv_fate_resource_release(dev, dh);
13822                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
13823                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
13824                 handle_idx = dh->next.next;
13825         }
13826 }
13827
13828 /**
13829  * Remove the flow from the NIC and the memory.
13830  * Lock free, (mutex should be acquired by caller).
13831  *
13832  * @param[in] dev
13833  *   Pointer to the Ethernet device structure.
13834  * @param[in, out] flow
13835  *   Pointer to flow structure.
13836  */
13837 static void
13838 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
13839 {
13840         struct mlx5_flow_handle *dev_handle;
13841         struct mlx5_priv *priv = dev->data->dev_private;
13842         struct mlx5_flow_meter_info *fm = NULL;
13843         uint32_t srss = 0;
13844
13845         if (!flow)
13846                 return;
13847         flow_dv_remove(dev, flow);
13848         if (flow->counter) {
13849                 flow_dv_counter_free(dev, flow->counter);
13850                 flow->counter = 0;
13851         }
13852         if (flow->meter) {
13853                 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
13854                 if (fm)
13855                         mlx5_flow_meter_detach(priv, fm);
13856                 flow->meter = 0;
13857         }
13858         /* Keep the current age handling by default. */
13859         if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
13860                 flow_dv_aso_ct_release(dev, flow->ct);
13861         else if (flow->age)
13862                 flow_dv_aso_age_release(dev, flow->age);
13863         if (flow->geneve_tlv_option) {
13864                 flow_dv_geneve_tlv_option_resource_release(dev);
13865                 flow->geneve_tlv_option = 0;
13866         }
13867         while (flow->dev_handles) {
13868                 uint32_t tmp_idx = flow->dev_handles;
13869
13870                 dev_handle = mlx5_ipool_get(priv->sh->ipool
13871                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
13872                 if (!dev_handle)
13873                         return;
13874                 flow->dev_handles = dev_handle->next.next;
13875                 if (dev_handle->dvh.matcher)
13876                         flow_dv_matcher_release(dev, dev_handle);
13877                 if (dev_handle->dvh.rix_sample)
13878                         flow_dv_sample_resource_release(dev, dev_handle);
13879                 if (dev_handle->dvh.rix_dest_array)
13880                         flow_dv_dest_array_resource_release(dev, dev_handle);
13881                 if (dev_handle->dvh.rix_encap_decap)
13882                         flow_dv_encap_decap_resource_release(dev,
13883                                 dev_handle->dvh.rix_encap_decap);
13884                 if (dev_handle->dvh.modify_hdr)
13885                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
13886                 if (dev_handle->dvh.rix_push_vlan)
13887                         flow_dv_push_vlan_action_resource_release(dev,
13888                                                                   dev_handle);
13889                 if (dev_handle->dvh.rix_tag)
13890                         flow_dv_tag_release(dev,
13891                                             dev_handle->dvh.rix_tag);
13892                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
13893                         flow_dv_fate_resource_release(dev, dev_handle);
13894                 else if (!srss)
13895                         srss = dev_handle->rix_srss;
13896                 if (fm && dev_handle->is_meter_flow_id &&
13897                     dev_handle->split_flow_id)
13898                         mlx5_ipool_free(fm->flow_ipool,
13899                                         dev_handle->split_flow_id);
13900                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
13901                            tmp_idx);
13902         }
13903         if (srss)
13904                 flow_dv_shared_rss_action_release(dev, srss);
13905 }
13906
13907 /**
13908  * Release array of hash RX queue objects.
13909  * Helper function.
13910  *
13911  * @param[in] dev
13912  *   Pointer to the Ethernet device structure.
13913  * @param[in, out] hrxqs
13914  *   Array of hash RX queue objects.
13915  *
13916  * @return
13917  *   Total number of references to hash RX queue objects in *hrxqs* array
13918  *   after this operation.
13919  */
13920 static int
13921 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
13922                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
13923 {
13924         size_t i;
13925         int remaining = 0;
13926
13927         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
13928                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
13929
13930                 if (!ret)
13931                         (*hrxqs)[i] = 0;
13932                 remaining += ret;
13933         }
13934         return remaining;
13935 }
13936
13937 /**
13938  * Release all hash RX queue objects representing shared RSS action.
13939  *
13940  * @param[in] dev
13941  *   Pointer to the Ethernet device structure.
13942  * @param[in, out] action
13943  *   Shared RSS action to remove hash RX queue objects from.
13944  *
13945  * @return
13946  *   Total number of references to hash RX queue objects stored in *action*
13947  *   after this operation.
13948  *   Expected to be 0 if no external references held.
13949  */
13950 static int
13951 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
13952                                  struct mlx5_shared_action_rss *shared_rss)
13953 {
13954         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
13955 }
13956
13957 /**
13958  * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
13959  * user input.
13960  *
13961  * Only one hash value is available for one L3+L4 combination:
13962  * for example:
13963  * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
13964  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
13965  * same slot in mlx5_rss_hash_fields.
13966  *
13967  * @param[in] rss
13968  *   Pointer to the shared action RSS conf.
13969  * @param[in, out] hash_field
13970  *   hash_field variable needed to be adjusted.
13971  *
13972  * @return
13973  *   void
13974  */
13975 static void
13976 __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
13977                                      uint64_t *hash_field)
13978 {
13979         uint64_t rss_types = rss->origin.types;
13980
13981         switch (*hash_field & ~IBV_RX_HASH_INNER) {
13982         case MLX5_RSS_HASH_IPV4:
13983                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
13984                         *hash_field &= ~MLX5_RSS_HASH_IPV4;
13985                         if (rss_types & ETH_RSS_L3_DST_ONLY)
13986                                 *hash_field |= IBV_RX_HASH_DST_IPV4;
13987                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
13988                                 *hash_field |= IBV_RX_HASH_SRC_IPV4;
13989                         else
13990                                 *hash_field |= MLX5_RSS_HASH_IPV4;
13991                 }
13992                 return;
13993         case MLX5_RSS_HASH_IPV6:
13994                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
13995                         *hash_field &= ~MLX5_RSS_HASH_IPV6;
13996                         if (rss_types & ETH_RSS_L3_DST_ONLY)
13997                                 *hash_field |= IBV_RX_HASH_DST_IPV6;
13998                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
13999                                 *hash_field |= IBV_RX_HASH_SRC_IPV6;
14000                         else
14001                                 *hash_field |= MLX5_RSS_HASH_IPV6;
14002                 }
14003                 return;
14004         case MLX5_RSS_HASH_IPV4_UDP:
14005                 /* fall-through. */
14006         case MLX5_RSS_HASH_IPV6_UDP:
14007                 if (rss_types & ETH_RSS_UDP) {
14008                         *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
14009                         if (rss_types & ETH_RSS_L4_DST_ONLY)
14010                                 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
14011                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
14012                                 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
14013                         else
14014                                 *hash_field |= MLX5_UDP_IBV_RX_HASH;
14015                 }
14016                 return;
14017         case MLX5_RSS_HASH_IPV4_TCP:
14018                 /* fall-through. */
14019         case MLX5_RSS_HASH_IPV6_TCP:
14020                 if (rss_types & ETH_RSS_TCP) {
14021                         *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
14022                         if (rss_types & ETH_RSS_L4_DST_ONLY)
14023                                 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
14024                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
14025                                 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
14026                         else
14027                                 *hash_field |= MLX5_TCP_IBV_RX_HASH;
14028                 }
14029                 return;
14030         default:
14031                 return;
14032         }
14033 }
14034
14035 /**
14036  * Setup shared RSS action.
14037  * Prepare set of hash RX queue objects sufficient to handle all valid
14038  * hash_fields combinations (see enum ibv_rx_hash_fields).
14039  *
14040  * @param[in] dev
14041  *   Pointer to the Ethernet device structure.
14042  * @param[in] action_idx
14043  *   Shared RSS action ipool index.
14044  * @param[in, out] action
14045  *   Partially initialized shared RSS action.
14046  * @param[out] error
14047  *   Perform verbose error reporting if not NULL. Initialized in case of
14048  *   error only.
14049  *
14050  * @return
14051  *   0 on success, otherwise negative errno value.
14052  */
14053 static int
14054 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
14055                            uint32_t action_idx,
14056                            struct mlx5_shared_action_rss *shared_rss,
14057                            struct rte_flow_error *error)
14058 {
14059         struct mlx5_flow_rss_desc rss_desc = { 0 };
14060         size_t i;
14061         int err;
14062
14063         if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
14064                 return rte_flow_error_set(error, rte_errno,
14065                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14066                                           "cannot setup indirection table");
14067         }
14068         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
14069         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
14070         rss_desc.const_q = shared_rss->origin.queue;
14071         rss_desc.queue_num = shared_rss->origin.queue_num;
14072         /* Set non-zero value to indicate a shared RSS. */
14073         rss_desc.shared_rss = action_idx;
14074         rss_desc.ind_tbl = shared_rss->ind_tbl;
14075         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
14076                 uint32_t hrxq_idx;
14077                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
14078                 int tunnel = 0;
14079
14080                 __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
14081                 if (shared_rss->origin.level > 1) {
14082                         hash_fields |= IBV_RX_HASH_INNER;
14083                         tunnel = 1;
14084                 }
14085                 rss_desc.tunnel = tunnel;
14086                 rss_desc.hash_fields = hash_fields;
14087                 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
14088                 if (!hrxq_idx) {
14089                         rte_flow_error_set
14090                                 (error, rte_errno,
14091                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14092                                  "cannot get hash queue");
14093                         goto error_hrxq_new;
14094                 }
14095                 err = __flow_dv_action_rss_hrxq_set
14096                         (shared_rss, hash_fields, hrxq_idx);
14097                 MLX5_ASSERT(!err);
14098         }
14099         return 0;
14100 error_hrxq_new:
14101         err = rte_errno;
14102         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14103         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
14104                 shared_rss->ind_tbl = NULL;
14105         rte_errno = err;
14106         return -rte_errno;
14107 }
14108
14109 /**
14110  * Create shared RSS action.
14111  *
14112  * @param[in] dev
14113  *   Pointer to the Ethernet device structure.
14114  * @param[in] conf
14115  *   Shared action configuration.
14116  * @param[in] rss
14117  *   RSS action specification used to create shared action.
14118  * @param[out] error
14119  *   Perform verbose error reporting if not NULL. Initialized in case of
14120  *   error only.
14121  *
14122  * @return
14123  *   A valid shared action ID in case of success, 0 otherwise and
14124  *   rte_errno is set.
14125  */
14126 static uint32_t
14127 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
14128                             const struct rte_flow_indir_action_conf *conf,
14129                             const struct rte_flow_action_rss *rss,
14130                             struct rte_flow_error *error)
14131 {
14132         struct mlx5_priv *priv = dev->data->dev_private;
14133         struct mlx5_shared_action_rss *shared_rss = NULL;
14134         void *queue = NULL;
14135         struct rte_flow_action_rss *origin;
14136         const uint8_t *rss_key;
14137         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
14138         uint32_t idx;
14139
14140         RTE_SET_USED(conf);
14141         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14142                             0, SOCKET_ID_ANY);
14143         shared_rss = mlx5_ipool_zmalloc
14144                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
14145         if (!shared_rss || !queue) {
14146                 rte_flow_error_set(error, ENOMEM,
14147                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14148                                    "cannot allocate resource memory");
14149                 goto error_rss_init;
14150         }
14151         if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
14152                 rte_flow_error_set(error, E2BIG,
14153                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14154                                    "rss action number out of range");
14155                 goto error_rss_init;
14156         }
14157         shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
14158                                           sizeof(*shared_rss->ind_tbl),
14159                                           0, SOCKET_ID_ANY);
14160         if (!shared_rss->ind_tbl) {
14161                 rte_flow_error_set(error, ENOMEM,
14162                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14163                                    "cannot allocate resource memory");
14164                 goto error_rss_init;
14165         }
14166         memcpy(queue, rss->queue, queue_size);
14167         shared_rss->ind_tbl->queues = queue;
14168         shared_rss->ind_tbl->queues_n = rss->queue_num;
14169         origin = &shared_rss->origin;
14170         origin->func = rss->func;
14171         origin->level = rss->level;
14172         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
14173         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
14174         /* NULL RSS key indicates default RSS key. */
14175         rss_key = !rss->key ? rss_hash_default_key : rss->key;
14176         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
14177         origin->key = &shared_rss->key[0];
14178         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
14179         origin->queue = queue;
14180         origin->queue_num = rss->queue_num;
14181         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
14182                 goto error_rss_init;
14183         rte_spinlock_init(&shared_rss->action_rss_sl);
14184         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14185         rte_spinlock_lock(&priv->shared_act_sl);
14186         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14187                      &priv->rss_shared_actions, idx, shared_rss, next);
14188         rte_spinlock_unlock(&priv->shared_act_sl);
14189         return idx;
14190 error_rss_init:
14191         if (shared_rss) {
14192                 if (shared_rss->ind_tbl)
14193                         mlx5_free(shared_rss->ind_tbl);
14194                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14195                                 idx);
14196         }
14197         if (queue)
14198                 mlx5_free(queue);
14199         return 0;
14200 }
14201
14202 /**
14203  * Destroy the shared RSS action.
14204  * Release related hash RX queue objects.
14205  *
14206  * @param[in] dev
14207  *   Pointer to the Ethernet device structure.
14208  * @param[in] idx
14209  *   The shared RSS action object ID to be removed.
14210  * @param[out] error
14211  *   Perform verbose error reporting if not NULL. Initialized in case of
14212  *   error only.
14213  *
14214  * @return
14215  *   0 on success, otherwise negative errno value.
14216  */
14217 static int
14218 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
14219                              struct rte_flow_error *error)
14220 {
14221         struct mlx5_priv *priv = dev->data->dev_private;
14222         struct mlx5_shared_action_rss *shared_rss =
14223             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14224         uint32_t old_refcnt = 1;
14225         int remaining;
14226         uint16_t *queue = NULL;
14227
14228         if (!shared_rss)
14229                 return rte_flow_error_set(error, EINVAL,
14230                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14231                                           "invalid shared action");
14232         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14233         if (remaining)
14234                 return rte_flow_error_set(error, EBUSY,
14235                                           RTE_FLOW_ERROR_TYPE_ACTION,
14236                                           NULL,
14237                                           "shared rss hrxq has references");
14238         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
14239                                          0, 0, __ATOMIC_ACQUIRE,
14240                                          __ATOMIC_RELAXED))
14241                 return rte_flow_error_set(error, EBUSY,
14242                                           RTE_FLOW_ERROR_TYPE_ACTION,
14243                                           NULL,
14244                                           "shared rss has references");
14245         queue = shared_rss->ind_tbl->queues;
14246         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
14247         if (remaining)
14248                 return rte_flow_error_set(error, EBUSY,
14249                                           RTE_FLOW_ERROR_TYPE_ACTION,
14250                                           NULL,
14251                                           "shared rss indirection table has"
14252                                           " references");
14253         mlx5_free(queue);
14254         rte_spinlock_lock(&priv->shared_act_sl);
14255         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14256                      &priv->rss_shared_actions, idx, shared_rss, next);
14257         rte_spinlock_unlock(&priv->shared_act_sl);
14258         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14259                         idx);
14260         return 0;
14261 }
14262
14263 /**
14264  * Create indirect action, lock free,
14265  * (mutex should be acquired by caller).
14266  * Dispatcher for action type specific call.
14267  *
14268  * @param[in] dev
14269  *   Pointer to the Ethernet device structure.
14270  * @param[in] conf
14271  *   Shared action configuration.
14272  * @param[in] action
14273  *   Action specification used to create indirect action.
14274  * @param[out] error
14275  *   Perform verbose error reporting if not NULL. Initialized in case of
14276  *   error only.
14277  *
14278  * @return
14279  *   A valid shared action handle in case of success, NULL otherwise and
14280  *   rte_errno is set.
14281  */
14282 static struct rte_flow_action_handle *
14283 flow_dv_action_create(struct rte_eth_dev *dev,
14284                       const struct rte_flow_indir_action_conf *conf,
14285                       const struct rte_flow_action *action,
14286                       struct rte_flow_error *err)
14287 {
14288         struct mlx5_priv *priv = dev->data->dev_private;
14289         uint32_t age_idx = 0;
14290         uint32_t idx = 0;
14291         uint32_t ret = 0;
14292
14293         switch (action->type) {
14294         case RTE_FLOW_ACTION_TYPE_RSS:
14295                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
14296                 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
14297                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14298                 break;
14299         case RTE_FLOW_ACTION_TYPE_AGE:
14300                 age_idx = flow_dv_aso_age_alloc(dev, err);
14301                 if (!age_idx) {
14302                         ret = -rte_errno;
14303                         break;
14304                 }
14305                 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
14306                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
14307                 flow_dv_aso_age_params_init(dev, age_idx,
14308                                         ((const struct rte_flow_action_age *)
14309                                                 action->conf)->context ?
14310                                         ((const struct rte_flow_action_age *)
14311                                                 action->conf)->context :
14312                                         (void *)(uintptr_t)idx,
14313                                         ((const struct rte_flow_action_age *)
14314                                                 action->conf)->timeout);
14315                 ret = age_idx;
14316                 break;
14317         case RTE_FLOW_ACTION_TYPE_COUNT:
14318                 ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
14319                 idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
14320                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14321                 break;
14322         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
14323                 ret = flow_dv_translate_create_conntrack(dev, action->conf,
14324                                                          err);
14325                 idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
14326                 break;
14327         default:
14328                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
14329                                    NULL, "action type not supported");
14330                 break;
14331         }
14332         return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
14333 }
14334
14335 /**
14336  * Destroy the indirect action.
14337  * Release action related resources on the NIC and the memory.
14338  * Lock free, (mutex should be acquired by caller).
14339  * Dispatcher for action type specific call.
14340  *
14341  * @param[in] dev
14342  *   Pointer to the Ethernet device structure.
14343  * @param[in] handle
14344  *   The indirect action object handle to be removed.
14345  * @param[out] error
14346  *   Perform verbose error reporting if not NULL. Initialized in case of
14347  *   error only.
14348  *
14349  * @return
14350  *   0 on success, otherwise negative errno value.
14351  */
14352 static int
14353 flow_dv_action_destroy(struct rte_eth_dev *dev,
14354                        struct rte_flow_action_handle *handle,
14355                        struct rte_flow_error *error)
14356 {
14357         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14358         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14359         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14360         struct mlx5_flow_counter *cnt;
14361         uint32_t no_flow_refcnt = 1;
14362         int ret;
14363
14364         switch (type) {
14365         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14366                 return __flow_dv_action_rss_release(dev, idx, error);
14367         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
14368                 cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
14369                 if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
14370                                                  &no_flow_refcnt, 1, false,
14371                                                  __ATOMIC_ACQUIRE,
14372                                                  __ATOMIC_RELAXED))
14373                         return rte_flow_error_set(error, EBUSY,
14374                                                   RTE_FLOW_ERROR_TYPE_ACTION,
14375                                                   NULL,
14376                                                   "Indirect count action has references");
14377                 flow_dv_counter_free(dev, idx);
14378                 return 0;
14379         case MLX5_INDIRECT_ACTION_TYPE_AGE:
14380                 ret = flow_dv_aso_age_release(dev, idx);
14381                 if (ret)
14382                         /*
14383                          * In this case, the last flow has a reference will
14384                          * actually release the age action.
14385                          */
14386                         DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
14387                                 " released with references %d.", idx, ret);
14388                 return 0;
14389         case MLX5_INDIRECT_ACTION_TYPE_CT:
14390                 ret = flow_dv_aso_ct_release(dev, idx);
14391                 if (ret < 0)
14392                         return ret;
14393                 if (ret > 0)
14394                         DRV_LOG(DEBUG, "Connection tracking object %u still "
14395                                 "has references %d.", idx, ret);
14396                 return 0;
14397         default:
14398                 return rte_flow_error_set(error, ENOTSUP,
14399                                           RTE_FLOW_ERROR_TYPE_ACTION,
14400                                           NULL,
14401                                           "action type not supported");
14402         }
14403 }
14404
14405 /**
14406  * Updates in place shared RSS action configuration.
14407  *
14408  * @param[in] dev
14409  *   Pointer to the Ethernet device structure.
14410  * @param[in] idx
14411  *   The shared RSS action object ID to be updated.
14412  * @param[in] action_conf
14413  *   RSS action specification used to modify *shared_rss*.
14414  * @param[out] error
14415  *   Perform verbose error reporting if not NULL. Initialized in case of
14416  *   error only.
14417  *
14418  * @return
14419  *   0 on success, otherwise negative errno value.
14420  * @note: currently only support update of RSS queues.
14421  */
14422 static int
14423 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
14424                             const struct rte_flow_action_rss *action_conf,
14425                             struct rte_flow_error *error)
14426 {
14427         struct mlx5_priv *priv = dev->data->dev_private;
14428         struct mlx5_shared_action_rss *shared_rss =
14429             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14430         int ret = 0;
14431         void *queue = NULL;
14432         uint16_t *queue_old = NULL;
14433         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
14434
14435         if (!shared_rss)
14436                 return rte_flow_error_set(error, EINVAL,
14437                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14438                                           "invalid shared action to update");
14439         if (priv->obj_ops.ind_table_modify == NULL)
14440                 return rte_flow_error_set(error, ENOTSUP,
14441                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14442                                           "cannot modify indirection table");
14443         queue = mlx5_malloc(MLX5_MEM_ZERO,
14444                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14445                             0, SOCKET_ID_ANY);
14446         if (!queue)
14447                 return rte_flow_error_set(error, ENOMEM,
14448                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14449                                           NULL,
14450                                           "cannot allocate resource memory");
14451         memcpy(queue, action_conf->queue, queue_size);
14452         MLX5_ASSERT(shared_rss->ind_tbl);
14453         rte_spinlock_lock(&shared_rss->action_rss_sl);
14454         queue_old = shared_rss->ind_tbl->queues;
14455         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
14456                                         queue, action_conf->queue_num, true);
14457         if (ret) {
14458                 mlx5_free(queue);
14459                 ret = rte_flow_error_set(error, rte_errno,
14460                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14461                                           "cannot update indirection table");
14462         } else {
14463                 mlx5_free(queue_old);
14464                 shared_rss->origin.queue = queue;
14465                 shared_rss->origin.queue_num = action_conf->queue_num;
14466         }
14467         rte_spinlock_unlock(&shared_rss->action_rss_sl);
14468         return ret;
14469 }
14470
14471 /*
14472  * Updates in place conntrack context or direction.
14473  * Context update should be synchronized.
14474  *
14475  * @param[in] dev
14476  *   Pointer to the Ethernet device structure.
14477  * @param[in] idx
14478  *   The conntrack object ID to be updated.
14479  * @param[in] update
14480  *   Pointer to the structure of information to update.
14481  * @param[out] error
14482  *   Perform verbose error reporting if not NULL. Initialized in case of
14483  *   error only.
14484  *
14485  * @return
14486  *   0 on success, otherwise negative errno value.
14487  */
14488 static int
14489 __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,
14490                            const struct rte_flow_modify_conntrack *update,
14491                            struct rte_flow_error *error)
14492 {
14493         struct mlx5_priv *priv = dev->data->dev_private;
14494         struct mlx5_aso_ct_action *ct;
14495         const struct rte_flow_action_conntrack *new_prf;
14496         int ret = 0;
14497         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
14498         uint32_t dev_idx;
14499
14500         if (PORT_ID(priv) != owner)
14501                 return rte_flow_error_set(error, EACCES,
14502                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14503                                           NULL,
14504                                           "CT object owned by another port");
14505         dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
14506         ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
14507         if (!ct->refcnt)
14508                 return rte_flow_error_set(error, ENOMEM,
14509                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14510                                           NULL,
14511                                           "CT object is inactive");
14512         new_prf = &update->new_ct;
14513         if (update->direction)
14514                 ct->is_original = !!new_prf->is_original_dir;
14515         if (update->state) {
14516                 /* Only validate the profile when it needs to be updated. */
14517                 ret = mlx5_validate_action_ct(dev, new_prf, error);
14518                 if (ret)
14519                         return ret;
14520                 ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
14521                 if (ret)
14522                         return rte_flow_error_set(error, EIO,
14523                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14524                                         NULL,
14525                                         "Failed to send CT context update WQE");
14526                 /* Block until ready or a failure. */
14527                 ret = mlx5_aso_ct_available(priv->sh, ct);
14528                 if (ret)
14529                         rte_flow_error_set(error, rte_errno,
14530                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14531                                            NULL,
14532                                            "Timeout to get the CT update");
14533         }
14534         return ret;
14535 }
14536
14537 /**
14538  * Updates in place shared action configuration, lock free,
14539  * (mutex should be acquired by caller).
14540  *
14541  * @param[in] dev
14542  *   Pointer to the Ethernet device structure.
14543  * @param[in] handle
14544  *   The indirect action object handle to be updated.
14545  * @param[in] update
14546  *   Action specification used to modify the action pointed by *handle*.
14547  *   *update* could be of same type with the action pointed by the *handle*
14548  *   handle argument, or some other structures like a wrapper, depending on
14549  *   the indirect action type.
14550  * @param[out] error
14551  *   Perform verbose error reporting if not NULL. Initialized in case of
14552  *   error only.
14553  *
14554  * @return
14555  *   0 on success, otherwise negative errno value.
14556  */
14557 static int
14558 flow_dv_action_update(struct rte_eth_dev *dev,
14559                         struct rte_flow_action_handle *handle,
14560                         const void *update,
14561                         struct rte_flow_error *err)
14562 {
14563         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14564         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14565         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14566         const void *action_conf;
14567
14568         switch (type) {
14569         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14570                 action_conf = ((const struct rte_flow_action *)update)->conf;
14571                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
14572         case MLX5_INDIRECT_ACTION_TYPE_CT:
14573                 return __flow_dv_action_ct_update(dev, idx, update, err);
14574         default:
14575                 return rte_flow_error_set(err, ENOTSUP,
14576                                           RTE_FLOW_ERROR_TYPE_ACTION,
14577                                           NULL,
14578                                           "action type update not supported");
14579         }
14580 }
14581
14582 /**
14583  * Destroy the meter sub policy table rules.
14584  * Lock free, (mutex should be acquired by caller).
14585  *
14586  * @param[in] dev
14587  *   Pointer to Ethernet device.
14588  * @param[in] sub_policy
14589  *   Pointer to meter sub policy table.
14590  */
14591 static void
14592 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
14593                              struct mlx5_flow_meter_sub_policy *sub_policy)
14594 {
14595         struct mlx5_priv *priv = dev->data->dev_private;
14596         struct mlx5_flow_tbl_data_entry *tbl;
14597         struct mlx5_flow_meter_policy *policy = sub_policy->main_policy;
14598         struct mlx5_flow_meter_info *next_fm;
14599         struct mlx5_sub_policy_color_rule *color_rule;
14600         void *tmp;
14601         uint32_t i;
14602
14603         for (i = 0; i < RTE_COLORS; i++) {
14604                 next_fm = NULL;
14605                 if (i == RTE_COLOR_GREEN && policy &&
14606                     policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR)
14607                         next_fm = mlx5_flow_meter_find(priv,
14608                                         policy->act_cnt[i].next_mtr_id, NULL);
14609                 TAILQ_FOREACH_SAFE(color_rule, &sub_policy->color_rules[i],
14610                                    next_port, tmp) {
14611                         claim_zero(mlx5_flow_os_destroy_flow(color_rule->rule));
14612                         tbl = container_of(color_rule->matcher->tbl,
14613                                         typeof(*tbl), tbl);
14614                         mlx5_cache_unregister(&tbl->matchers,
14615                                                 &color_rule->matcher->entry);
14616                         TAILQ_REMOVE(&sub_policy->color_rules[i],
14617                                         color_rule, next_port);
14618                         mlx5_free(color_rule);
14619                         if (next_fm)
14620                                 mlx5_flow_meter_detach(priv, next_fm);
14621                 }
14622         }
14623         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
14624                 if (sub_policy->rix_hrxq[i]) {
14625                         if (policy && !policy->is_hierarchy)
14626                                 mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
14627                         sub_policy->rix_hrxq[i] = 0;
14628                 }
14629                 if (sub_policy->jump_tbl[i]) {
14630                         flow_dv_tbl_resource_release(MLX5_SH(dev),
14631                         sub_policy->jump_tbl[i]);
14632                         sub_policy->jump_tbl[i] = NULL;
14633                 }
14634         }
14635         if (sub_policy->tbl_rsc) {
14636                 flow_dv_tbl_resource_release(MLX5_SH(dev),
14637                         sub_policy->tbl_rsc);
14638                 sub_policy->tbl_rsc = NULL;
14639         }
14640 }
14641
14642 /**
14643  * Destroy policy rules, lock free,
14644  * (mutex should be acquired by caller).
14645  * Dispatcher for action type specific call.
14646  *
14647  * @param[in] dev
14648  *   Pointer to the Ethernet device structure.
14649  * @param[in] mtr_policy
14650  *   Meter policy struct.
14651  */
14652 static void
14653 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
14654                       struct mlx5_flow_meter_policy *mtr_policy)
14655 {
14656         uint32_t i, j;
14657         struct mlx5_flow_meter_sub_policy *sub_policy;
14658         uint16_t sub_policy_num;
14659
14660         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14661                 sub_policy_num = (mtr_policy->sub_policy_num >>
14662                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
14663                         MLX5_MTR_SUB_POLICY_NUM_MASK;
14664                 for (j = 0; j < sub_policy_num; j++) {
14665                         sub_policy = mtr_policy->sub_policys[i][j];
14666                         if (sub_policy)
14667                                 __flow_dv_destroy_sub_policy_rules
14668                                                 (dev, sub_policy);
14669                 }
14670         }
14671 }
14672
14673 /**
14674  * Destroy policy action, lock free,
14675  * (mutex should be acquired by caller).
14676  * Dispatcher for action type specific call.
14677  *
14678  * @param[in] dev
14679  *   Pointer to the Ethernet device structure.
14680  * @param[in] mtr_policy
14681  *   Meter policy struct.
14682  */
14683 static void
14684 flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
14685                       struct mlx5_flow_meter_policy *mtr_policy)
14686 {
14687         struct rte_flow_action *rss_action;
14688         struct mlx5_flow_handle dev_handle;
14689         uint32_t i, j;
14690
14691         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
14692                 if (mtr_policy->act_cnt[i].rix_mark) {
14693                         flow_dv_tag_release(dev,
14694                                 mtr_policy->act_cnt[i].rix_mark);
14695                         mtr_policy->act_cnt[i].rix_mark = 0;
14696                 }
14697                 if (mtr_policy->act_cnt[i].modify_hdr) {
14698                         dev_handle.dvh.modify_hdr =
14699                                 mtr_policy->act_cnt[i].modify_hdr;
14700                         flow_dv_modify_hdr_resource_release(dev, &dev_handle);
14701                 }
14702                 switch (mtr_policy->act_cnt[i].fate_action) {
14703                 case MLX5_FLOW_FATE_SHARED_RSS:
14704                         rss_action = mtr_policy->act_cnt[i].rss;
14705                         mlx5_free(rss_action);
14706                         break;
14707                 case MLX5_FLOW_FATE_PORT_ID:
14708                         if (mtr_policy->act_cnt[i].rix_port_id_action) {
14709                                 flow_dv_port_id_action_resource_release(dev,
14710                                 mtr_policy->act_cnt[i].rix_port_id_action);
14711                                 mtr_policy->act_cnt[i].rix_port_id_action = 0;
14712                         }
14713                         break;
14714                 case MLX5_FLOW_FATE_DROP:
14715                 case MLX5_FLOW_FATE_JUMP:
14716                         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
14717                                 mtr_policy->act_cnt[i].dr_jump_action[j] =
14718                                                 NULL;
14719                         break;
14720                 default:
14721                         /*Queue action do nothing*/
14722                         break;
14723                 }
14724         }
14725         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
14726                 mtr_policy->dr_drop_action[j] = NULL;
14727 }
14728
14729 /**
14730  * Create policy action per domain, lock free,
14731  * (mutex should be acquired by caller).
14732  * Dispatcher for action type specific call.
14733  *
14734  * @param[in] dev
14735  *   Pointer to the Ethernet device structure.
14736  * @param[in] mtr_policy
14737  *   Meter policy struct.
14738  * @param[in] action
14739  *   Action specification used to create meter actions.
14740  * @param[out] error
14741  *   Perform verbose error reporting if not NULL. Initialized in case of
14742  *   error only.
14743  *
14744  * @return
14745  *   0 on success, otherwise negative errno value.
14746  */
14747 static int
14748 __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
14749                         struct mlx5_flow_meter_policy *mtr_policy,
14750                         const struct rte_flow_action *actions[RTE_COLORS],
14751                         enum mlx5_meter_domain domain,
14752                         struct rte_mtr_error *error)
14753 {
14754         struct mlx5_priv *priv = dev->data->dev_private;
14755         struct rte_flow_error flow_err;
14756         const struct rte_flow_action *act;
14757         uint64_t action_flags = 0;
14758         struct mlx5_flow_handle dh;
14759         struct mlx5_flow dev_flow;
14760         struct mlx5_flow_dv_port_id_action_resource port_id_action;
14761         int i, ret;
14762         uint8_t egress, transfer;
14763         struct mlx5_meter_policy_action_container *act_cnt = NULL;
14764         union {
14765                 struct mlx5_flow_dv_modify_hdr_resource res;
14766                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
14767                             sizeof(struct mlx5_modification_cmd) *
14768                             (MLX5_MAX_MODIFY_NUM + 1)];
14769         } mhdr_dummy;
14770         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
14771
14772         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
14773         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
14774         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
14775         memset(&dev_flow, 0, sizeof(struct mlx5_flow));
14776         memset(&port_id_action, 0,
14777                 sizeof(struct mlx5_flow_dv_port_id_action_resource));
14778         memset(mhdr_res, 0, sizeof(*mhdr_res));
14779         mhdr_res->ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
14780                                         egress ?
14781                                         MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
14782                                         MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
14783         dev_flow.handle = &dh;
14784         dev_flow.dv.port_id_action = &port_id_action;
14785         dev_flow.external = true;
14786         for (i = 0; i < RTE_COLORS; i++) {
14787                 if (i < MLX5_MTR_RTE_COLORS)
14788                         act_cnt = &mtr_policy->act_cnt[i];
14789                 for (act = actions[i];
14790                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
14791                         act++) {
14792                         switch (act->type) {
14793                         case RTE_FLOW_ACTION_TYPE_MARK:
14794                         {
14795                                 uint32_t tag_be = mlx5_flow_mark_set
14796                                         (((const struct rte_flow_action_mark *)
14797                                         (act->conf))->id);
14798
14799                                 if (i >= MLX5_MTR_RTE_COLORS)
14800                                         return -rte_mtr_error_set(error,
14801                                           ENOTSUP,
14802                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14803                                           NULL,
14804                                           "cannot create policy "
14805                                           "mark action for this color");
14806                                 dev_flow.handle->mark = 1;
14807                                 if (flow_dv_tag_resource_register(dev, tag_be,
14808                                                   &dev_flow, &flow_err))
14809                                         return -rte_mtr_error_set(error,
14810                                         ENOTSUP,
14811                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14812                                         NULL,
14813                                         "cannot setup policy mark action");
14814                                 MLX5_ASSERT(dev_flow.dv.tag_resource);
14815                                 act_cnt->rix_mark =
14816                                         dev_flow.handle->dvh.rix_tag;
14817                                 action_flags |= MLX5_FLOW_ACTION_MARK;
14818                                 break;
14819                         }
14820                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
14821                                 if (i >= MLX5_MTR_RTE_COLORS)
14822                                         return -rte_mtr_error_set(error,
14823                                           ENOTSUP,
14824                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14825                                           NULL,
14826                                           "cannot create policy "
14827                                           "set tag action for this color");
14828                                 if (flow_dv_convert_action_set_tag
14829                                 (dev, mhdr_res,
14830                                 (const struct rte_flow_action_set_tag *)
14831                                 act->conf,  &flow_err))
14832                                         return -rte_mtr_error_set(error,
14833                                         ENOTSUP,
14834                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14835                                         NULL, "cannot convert policy "
14836                                         "set tag action");
14837                                 if (!mhdr_res->actions_num)
14838                                         return -rte_mtr_error_set(error,
14839                                         ENOTSUP,
14840                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14841                                         NULL, "cannot find policy "
14842                                         "set tag action");
14843                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
14844                                 break;
14845                         case RTE_FLOW_ACTION_TYPE_DROP:
14846                         {
14847                                 struct mlx5_flow_mtr_mng *mtrmng =
14848                                                 priv->sh->mtrmng;
14849                                 struct mlx5_flow_tbl_data_entry *tbl_data;
14850
14851                                 /*
14852                                  * Create the drop table with
14853                                  * METER DROP level.
14854                                  */
14855                                 if (!mtrmng->drop_tbl[domain]) {
14856                                         mtrmng->drop_tbl[domain] =
14857                                         flow_dv_tbl_resource_get(dev,
14858                                         MLX5_FLOW_TABLE_LEVEL_METER,
14859                                         egress, transfer, false, NULL, 0,
14860                                         0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
14861                                         if (!mtrmng->drop_tbl[domain])
14862                                                 return -rte_mtr_error_set
14863                                         (error, ENOTSUP,
14864                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14865                                         NULL,
14866                                         "Failed to create meter drop table");
14867                                 }
14868                                 tbl_data = container_of
14869                                 (mtrmng->drop_tbl[domain],
14870                                 struct mlx5_flow_tbl_data_entry, tbl);
14871                                 if (i < MLX5_MTR_RTE_COLORS) {
14872                                         act_cnt->dr_jump_action[domain] =
14873                                                 tbl_data->jump.action;
14874                                         act_cnt->fate_action =
14875                                                 MLX5_FLOW_FATE_DROP;
14876                                 }
14877                                 if (i == RTE_COLOR_RED)
14878                                         mtr_policy->dr_drop_action[domain] =
14879                                                 tbl_data->jump.action;
14880                                 action_flags |= MLX5_FLOW_ACTION_DROP;
14881                                 break;
14882                         }
14883                         case RTE_FLOW_ACTION_TYPE_QUEUE:
14884                         {
14885                                 if (i >= MLX5_MTR_RTE_COLORS)
14886                                         return -rte_mtr_error_set(error,
14887                                         ENOTSUP,
14888                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14889                                         NULL, "cannot create policy "
14890                                         "fate queue for this color");
14891                                 act_cnt->queue =
14892                                 ((const struct rte_flow_action_queue *)
14893                                         (act->conf))->index;
14894                                 act_cnt->fate_action =
14895                                         MLX5_FLOW_FATE_QUEUE;
14896                                 dev_flow.handle->fate_action =
14897                                         MLX5_FLOW_FATE_QUEUE;
14898                                 mtr_policy->is_queue = 1;
14899                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
14900                                 break;
14901                         }
14902                         case RTE_FLOW_ACTION_TYPE_RSS:
14903                         {
14904                                 int rss_size;
14905
14906                                 if (i >= MLX5_MTR_RTE_COLORS)
14907                                         return -rte_mtr_error_set(error,
14908                                           ENOTSUP,
14909                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14910                                           NULL,
14911                                           "cannot create policy "
14912                                           "rss action for this color");
14913                                 /*
14914                                  * Save RSS conf into policy struct
14915                                  * for translate stage.
14916                                  */
14917                                 rss_size = (int)rte_flow_conv
14918                                         (RTE_FLOW_CONV_OP_ACTION,
14919                                         NULL, 0, act, &flow_err);
14920                                 if (rss_size <= 0)
14921                                         return -rte_mtr_error_set(error,
14922                                           ENOTSUP,
14923                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14924                                           NULL, "Get the wrong "
14925                                           "rss action struct size");
14926                                 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
14927                                                 rss_size, 0, SOCKET_ID_ANY);
14928                                 if (!act_cnt->rss)
14929                                         return -rte_mtr_error_set(error,
14930                                           ENOTSUP,
14931                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14932                                           NULL,
14933                                           "Fail to malloc rss action memory");
14934                                 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
14935                                         act_cnt->rss, rss_size,
14936                                         act, &flow_err);
14937                                 if (ret < 0)
14938                                         return -rte_mtr_error_set(error,
14939                                           ENOTSUP,
14940                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14941                                           NULL, "Fail to save "
14942                                           "rss action into policy struct");
14943                                 act_cnt->fate_action =
14944                                         MLX5_FLOW_FATE_SHARED_RSS;
14945                                 action_flags |= MLX5_FLOW_ACTION_RSS;
14946                                 break;
14947                         }
14948                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
14949                         {
14950                                 struct mlx5_flow_dv_port_id_action_resource
14951                                         port_id_resource;
14952                                 uint32_t port_id = 0;
14953
14954                                 if (i >= MLX5_MTR_RTE_COLORS)
14955                                         return -rte_mtr_error_set(error,
14956                                         ENOTSUP,
14957                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14958                                         NULL, "cannot create policy "
14959                                         "port action for this color");
14960                                 memset(&port_id_resource, 0,
14961                                         sizeof(port_id_resource));
14962                                 if (flow_dv_translate_action_port_id(dev, act,
14963                                                 &port_id, &flow_err))
14964                                         return -rte_mtr_error_set(error,
14965                                         ENOTSUP,
14966                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14967                                         NULL, "cannot translate "
14968                                         "policy port action");
14969                                 port_id_resource.port_id = port_id;
14970                                 if (flow_dv_port_id_action_resource_register
14971                                         (dev, &port_id_resource,
14972                                         &dev_flow, &flow_err))
14973                                         return -rte_mtr_error_set(error,
14974                                         ENOTSUP,
14975                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14976                                         NULL, "cannot setup "
14977                                         "policy port action");
14978                                 act_cnt->rix_port_id_action =
14979                                         dev_flow.handle->rix_port_id_action;
14980                                 act_cnt->fate_action =
14981                                         MLX5_FLOW_FATE_PORT_ID;
14982                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
14983                                 break;
14984                         }
14985                         case RTE_FLOW_ACTION_TYPE_JUMP:
14986                         {
14987                                 uint32_t jump_group = 0;
14988                                 uint32_t table = 0;
14989                                 struct mlx5_flow_tbl_data_entry *tbl_data;
14990                                 struct flow_grp_info grp_info = {
14991                                         .external = !!dev_flow.external,
14992                                         .transfer = !!transfer,
14993                                         .fdb_def_rule = !!priv->fdb_def_rule,
14994                                         .std_tbl_fix = 0,
14995                                         .skip_scale = dev_flow.skip_scale &
14996                                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
14997                                 };
14998                                 struct mlx5_flow_meter_sub_policy *sub_policy =
14999                                 mtr_policy->sub_policys[domain][0];
15000
15001                                 if (i >= MLX5_MTR_RTE_COLORS)
15002                                         return -rte_mtr_error_set(error,
15003                                           ENOTSUP,
15004                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15005                                           NULL,
15006                                           "cannot create policy "
15007                                           "jump action for this color");
15008                                 jump_group =
15009                                 ((const struct rte_flow_action_jump *)
15010                                                         act->conf)->group;
15011                                 if (mlx5_flow_group_to_table(dev, NULL,
15012                                                        jump_group,
15013                                                        &table,
15014                                                        &grp_info, &flow_err))
15015                                         return -rte_mtr_error_set(error,
15016                                         ENOTSUP,
15017                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15018                                         NULL, "cannot setup "
15019                                         "policy jump action");
15020                                 sub_policy->jump_tbl[i] =
15021                                 flow_dv_tbl_resource_get(dev,
15022                                         table, egress,
15023                                         transfer,
15024                                         !!dev_flow.external,
15025                                         NULL, jump_group, 0,
15026                                         0, &flow_err);
15027                                 if
15028                                 (!sub_policy->jump_tbl[i])
15029                                         return  -rte_mtr_error_set(error,
15030                                         ENOTSUP,
15031                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15032                                         NULL, "cannot create jump action.");
15033                                 tbl_data = container_of
15034                                 (sub_policy->jump_tbl[i],
15035                                 struct mlx5_flow_tbl_data_entry, tbl);
15036                                 act_cnt->dr_jump_action[domain] =
15037                                         tbl_data->jump.action;
15038                                 act_cnt->fate_action =
15039                                         MLX5_FLOW_FATE_JUMP;
15040                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
15041                                 break;
15042                         }
15043                         case RTE_FLOW_ACTION_TYPE_METER:
15044                         {
15045                                 const struct rte_flow_action_meter *mtr;
15046                                 struct mlx5_flow_meter_info *next_fm;
15047                                 struct mlx5_flow_meter_policy *next_policy;
15048                                 struct rte_flow_action tag_action;
15049                                 struct mlx5_rte_flow_action_set_tag set_tag;
15050                                 uint32_t next_mtr_idx = 0;
15051
15052                                 mtr = act->conf;
15053                                 next_fm = mlx5_flow_meter_find(priv,
15054                                                         mtr->mtr_id,
15055                                                         &next_mtr_idx);
15056                                 if (!next_fm)
15057                                         return -rte_mtr_error_set(error, EINVAL,
15058                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15059                                                 "Fail to find next meter.");
15060                                 if (next_fm->def_policy)
15061                                         return -rte_mtr_error_set(error, EINVAL,
15062                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15063                                 "Hierarchy only supports termination meter.");
15064                                 next_policy = mlx5_flow_meter_policy_find(dev,
15065                                                 next_fm->policy_id, NULL);
15066                                 MLX5_ASSERT(next_policy);
15067                                 if (next_fm->drop_cnt) {
15068                                         set_tag.id =
15069                                                 (enum modify_reg)
15070                                                 mlx5_flow_get_reg_id(dev,
15071                                                 MLX5_MTR_ID,
15072                                                 0,
15073                                                 (struct rte_flow_error *)error);
15074                                         set_tag.offset = (priv->mtr_reg_share ?
15075                                                 MLX5_MTR_COLOR_BITS : 0);
15076                                         set_tag.length = (priv->mtr_reg_share ?
15077                                                MLX5_MTR_IDLE_BITS_IN_COLOR_REG :
15078                                                MLX5_REG_BITS);
15079                                         set_tag.data = next_mtr_idx;
15080                                         tag_action.type =
15081                                                 (enum rte_flow_action_type)
15082                                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG;
15083                                         tag_action.conf = &set_tag;
15084                                         if (flow_dv_convert_action_set_reg
15085                                                 (mhdr_res, &tag_action,
15086                                                 (struct rte_flow_error *)error))
15087                                                 return -rte_errno;
15088                                         action_flags |=
15089                                                 MLX5_FLOW_ACTION_SET_TAG;
15090                                 }
15091                                 act_cnt->fate_action = MLX5_FLOW_FATE_MTR;
15092                                 act_cnt->next_mtr_id = next_fm->meter_id;
15093                                 act_cnt->next_sub_policy = NULL;
15094                                 mtr_policy->is_hierarchy = 1;
15095                                 mtr_policy->dev = next_policy->dev;
15096                                 action_flags |=
15097                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
15098                                 break;
15099                         }
15100                         default:
15101                                 return -rte_mtr_error_set(error, ENOTSUP,
15102                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15103                                           NULL, "action type not supported");
15104                         }
15105                         if (action_flags & MLX5_FLOW_ACTION_SET_TAG) {
15106                                 /* create modify action if needed. */
15107                                 dev_flow.dv.group = 1;
15108                                 if (flow_dv_modify_hdr_resource_register
15109                                         (dev, mhdr_res, &dev_flow, &flow_err))
15110                                         return -rte_mtr_error_set(error,
15111                                                 ENOTSUP,
15112                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15113                                                 NULL, "cannot register policy "
15114                                                 "set tag action");
15115                                 act_cnt->modify_hdr =
15116                                         dev_flow.handle->dvh.modify_hdr;
15117                         }
15118                 }
15119         }
15120         return 0;
15121 }
15122
15123 /**
15124  * Create policy action per domain, lock free,
15125  * (mutex should be acquired by caller).
15126  * Dispatcher for action type specific call.
15127  *
15128  * @param[in] dev
15129  *   Pointer to the Ethernet device structure.
15130  * @param[in] mtr_policy
15131  *   Meter policy struct.
15132  * @param[in] action
15133  *   Action specification used to create meter actions.
15134  * @param[out] error
15135  *   Perform verbose error reporting if not NULL. Initialized in case of
15136  *   error only.
15137  *
15138  * @return
15139  *   0 on success, otherwise negative errno value.
15140  */
15141 static int
15142 flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
15143                       struct mlx5_flow_meter_policy *mtr_policy,
15144                       const struct rte_flow_action *actions[RTE_COLORS],
15145                       struct rte_mtr_error *error)
15146 {
15147         int ret, i;
15148         uint16_t sub_policy_num;
15149
15150         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15151                 sub_policy_num = (mtr_policy->sub_policy_num >>
15152                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15153                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15154                 if (sub_policy_num) {
15155                         ret = __flow_dv_create_domain_policy_acts(dev,
15156                                 mtr_policy, actions,
15157                                 (enum mlx5_meter_domain)i, error);
15158                         if (ret)
15159                                 return ret;
15160                 }
15161         }
15162         return 0;
15163 }
15164
15165 /**
15166  * Query a DV flow rule for its statistics via DevX.
15167  *
15168  * @param[in] dev
15169  *   Pointer to Ethernet device.
15170  * @param[in] cnt_idx
15171  *   Index to the flow counter.
15172  * @param[out] data
15173  *   Data retrieved by the query.
15174  * @param[out] error
15175  *   Perform verbose error reporting if not NULL.
15176  *
15177  * @return
15178  *   0 on success, a negative errno value otherwise and rte_errno is set.
15179  */
15180 static int
15181 flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
15182                     struct rte_flow_error *error)
15183 {
15184         struct mlx5_priv *priv = dev->data->dev_private;
15185         struct rte_flow_query_count *qc = data;
15186
15187         if (!priv->config.devx)
15188                 return rte_flow_error_set(error, ENOTSUP,
15189                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15190                                           NULL,
15191                                           "counters are not supported");
15192         if (cnt_idx) {
15193                 uint64_t pkts, bytes;
15194                 struct mlx5_flow_counter *cnt;
15195                 int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
15196
15197                 if (err)
15198                         return rte_flow_error_set(error, -err,
15199                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15200                                         NULL, "cannot read counters");
15201                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15202                 qc->hits_set = 1;
15203                 qc->bytes_set = 1;
15204                 qc->hits = pkts - cnt->hits;
15205                 qc->bytes = bytes - cnt->bytes;
15206                 if (qc->reset) {
15207                         cnt->hits = pkts;
15208                         cnt->bytes = bytes;
15209                 }
15210                 return 0;
15211         }
15212         return rte_flow_error_set(error, EINVAL,
15213                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15214                                   NULL,
15215                                   "counters are not available");
15216 }
15217
15218 static int
15219 flow_dv_action_query(struct rte_eth_dev *dev,
15220                      const struct rte_flow_action_handle *handle, void *data,
15221                      struct rte_flow_error *error)
15222 {
15223         struct mlx5_age_param *age_param;
15224         struct rte_flow_query_age *resp;
15225         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15226         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15227         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15228         struct mlx5_priv *priv = dev->data->dev_private;
15229         struct mlx5_aso_ct_action *ct;
15230         uint16_t owner;
15231         uint32_t dev_idx;
15232
15233         switch (type) {
15234         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15235                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
15236                 resp = data;
15237                 resp->aged = __atomic_load_n(&age_param->state,
15238                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
15239                                                                           1 : 0;
15240                 resp->sec_since_last_hit_valid = !resp->aged;
15241                 if (resp->sec_since_last_hit_valid)
15242                         resp->sec_since_last_hit = __atomic_load_n
15243                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15244                 return 0;
15245         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15246                 return flow_dv_query_count(dev, idx, data, error);
15247         case MLX5_INDIRECT_ACTION_TYPE_CT:
15248                 owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15249                 if (owner != PORT_ID(priv))
15250                         return rte_flow_error_set(error, EACCES,
15251                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15252                                         NULL,
15253                                         "CT object owned by another port");
15254                 dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15255                 ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15256                 MLX5_ASSERT(ct);
15257                 if (!ct->refcnt)
15258                         return rte_flow_error_set(error, EFAULT,
15259                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15260                                         NULL,
15261                                         "CT object is inactive");
15262                 ((struct rte_flow_action_conntrack *)data)->peer_port =
15263                                                         ct->peer;
15264                 ((struct rte_flow_action_conntrack *)data)->is_original_dir =
15265                                                         ct->is_original;
15266                 if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))
15267                         return rte_flow_error_set(error, EIO,
15268                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15269                                         NULL,
15270                                         "Failed to query CT context");
15271                 return 0;
15272         default:
15273                 return rte_flow_error_set(error, ENOTSUP,
15274                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15275                                           "action type query not supported");
15276         }
15277 }
15278
15279 /**
15280  * Query a flow rule AGE action for aging information.
15281  *
15282  * @param[in] dev
15283  *   Pointer to Ethernet device.
15284  * @param[in] flow
15285  *   Pointer to the sub flow.
15286  * @param[out] data
15287  *   data retrieved by the query.
15288  * @param[out] error
15289  *   Perform verbose error reporting if not NULL.
15290  *
15291  * @return
15292  *   0 on success, a negative errno value otherwise and rte_errno is set.
15293  */
15294 static int
15295 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
15296                   void *data, struct rte_flow_error *error)
15297 {
15298         struct rte_flow_query_age *resp = data;
15299         struct mlx5_age_param *age_param;
15300
15301         if (flow->age) {
15302                 struct mlx5_aso_age_action *act =
15303                                      flow_aso_age_get_by_idx(dev, flow->age);
15304
15305                 age_param = &act->age_params;
15306         } else if (flow->counter) {
15307                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
15308
15309                 if (!age_param || !age_param->timeout)
15310                         return rte_flow_error_set
15311                                         (error, EINVAL,
15312                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15313                                          NULL, "cannot read age data");
15314         } else {
15315                 return rte_flow_error_set(error, EINVAL,
15316                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15317                                           NULL, "age data not available");
15318         }
15319         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
15320                                      AGE_TMOUT ? 1 : 0;
15321         resp->sec_since_last_hit_valid = !resp->aged;
15322         if (resp->sec_since_last_hit_valid)
15323                 resp->sec_since_last_hit = __atomic_load_n
15324                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15325         return 0;
15326 }
15327
15328 /**
15329  * Query a flow.
15330  *
15331  * @see rte_flow_query()
15332  * @see rte_flow_ops
15333  */
15334 static int
15335 flow_dv_query(struct rte_eth_dev *dev,
15336               struct rte_flow *flow __rte_unused,
15337               const struct rte_flow_action *actions __rte_unused,
15338               void *data __rte_unused,
15339               struct rte_flow_error *error __rte_unused)
15340 {
15341         int ret = -EINVAL;
15342
15343         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
15344                 switch (actions->type) {
15345                 case RTE_FLOW_ACTION_TYPE_VOID:
15346                         break;
15347                 case RTE_FLOW_ACTION_TYPE_COUNT:
15348                         ret = flow_dv_query_count(dev, flow->counter, data,
15349                                                   error);
15350                         break;
15351                 case RTE_FLOW_ACTION_TYPE_AGE:
15352                         ret = flow_dv_query_age(dev, flow, data, error);
15353                         break;
15354                 default:
15355                         return rte_flow_error_set(error, ENOTSUP,
15356                                                   RTE_FLOW_ERROR_TYPE_ACTION,
15357                                                   actions,
15358                                                   "action not supported");
15359                 }
15360         }
15361         return ret;
15362 }
15363
15364 /**
15365  * Destroy the meter table set.
15366  * Lock free, (mutex should be acquired by caller).
15367  *
15368  * @param[in] dev
15369  *   Pointer to Ethernet device.
15370  * @param[in] fm
15371  *   Meter information table.
15372  */
15373 static void
15374 flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
15375                         struct mlx5_flow_meter_info *fm)
15376 {
15377         struct mlx5_priv *priv = dev->data->dev_private;
15378         int i;
15379
15380         if (!fm || !priv->config.dv_flow_en)
15381                 return;
15382         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15383                 if (fm->drop_rule[i]) {
15384                         claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
15385                         fm->drop_rule[i] = NULL;
15386                 }
15387         }
15388 }
15389
15390 static void
15391 flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
15392 {
15393         struct mlx5_priv *priv = dev->data->dev_private;
15394         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15395         struct mlx5_flow_tbl_data_entry *tbl;
15396         int i, j;
15397
15398         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15399                 if (mtrmng->def_rule[i]) {
15400                         claim_zero(mlx5_flow_os_destroy_flow
15401                                         (mtrmng->def_rule[i]));
15402                         mtrmng->def_rule[i] = NULL;
15403                 }
15404                 if (mtrmng->def_matcher[i]) {
15405                         tbl = container_of(mtrmng->def_matcher[i]->tbl,
15406                                 struct mlx5_flow_tbl_data_entry, tbl);
15407                         mlx5_cache_unregister(&tbl->matchers,
15408                                       &mtrmng->def_matcher[i]->entry);
15409                         mtrmng->def_matcher[i] = NULL;
15410                 }
15411                 for (j = 0; j < MLX5_REG_BITS; j++) {
15412                         if (mtrmng->drop_matcher[i][j]) {
15413                                 tbl =
15414                                 container_of(mtrmng->drop_matcher[i][j]->tbl,
15415                                              struct mlx5_flow_tbl_data_entry,
15416                                              tbl);
15417                                 mlx5_cache_unregister(&tbl->matchers,
15418                                         &mtrmng->drop_matcher[i][j]->entry);
15419                                 mtrmng->drop_matcher[i][j] = NULL;
15420                         }
15421                 }
15422                 if (mtrmng->drop_tbl[i]) {
15423                         flow_dv_tbl_resource_release(MLX5_SH(dev),
15424                                 mtrmng->drop_tbl[i]);
15425                         mtrmng->drop_tbl[i] = NULL;
15426                 }
15427         }
15428 }
15429
15430 /* Number of meter flow actions, count and jump or count and drop. */
15431 #define METER_ACTIONS 2
15432
15433 static void
15434 __flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
15435                               enum mlx5_meter_domain domain)
15436 {
15437         struct mlx5_priv *priv = dev->data->dev_private;
15438         struct mlx5_flow_meter_def_policy *def_policy =
15439                         priv->sh->mtrmng->def_policy[domain];
15440
15441         __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
15442         mlx5_free(def_policy);
15443         priv->sh->mtrmng->def_policy[domain] = NULL;
15444 }
15445
15446 /**
15447  * Destroy the default policy table set.
15448  *
15449  * @param[in] dev
15450  *   Pointer to Ethernet device.
15451  */
15452 static void
15453 flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
15454 {
15455         struct mlx5_priv *priv = dev->data->dev_private;
15456         int i;
15457
15458         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
15459                 if (priv->sh->mtrmng->def_policy[i])
15460                         __flow_dv_destroy_domain_def_policy(dev,
15461                                         (enum mlx5_meter_domain)i);
15462         priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
15463 }
15464
15465 static int
15466 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
15467                         uint32_t color_reg_c_idx,
15468                         enum rte_color color, void *matcher_object,
15469                         int actions_n, void *actions,
15470                         bool match_src_port, const struct rte_flow_item *item,
15471                         void **rule, const struct rte_flow_attr *attr)
15472 {
15473         int ret;
15474         struct mlx5_flow_dv_match_params value = {
15475                 .size = sizeof(value.buf) -
15476                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15477         };
15478         struct mlx5_flow_dv_match_params matcher = {
15479                 .size = sizeof(matcher.buf) -
15480                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15481         };
15482         struct mlx5_priv *priv = dev->data->dev_private;
15483
15484         if (match_src_port && (priv->representor || priv->master)) {
15485                 if (flow_dv_translate_item_port_id(dev, matcher.buf,
15486                                                    value.buf, item, attr)) {
15487                         DRV_LOG(ERR,
15488                         "Failed to create meter policy flow with port.");
15489                         return -1;
15490                 }
15491         }
15492         flow_dv_match_meta_reg(matcher.buf, value.buf,
15493                                 (enum modify_reg)color_reg_c_idx,
15494                                 rte_col_2_mlx5_col(color),
15495                                 UINT32_MAX);
15496         ret = mlx5_flow_os_create_flow(matcher_object,
15497                         (void *)&value, actions_n, actions, rule);
15498         if (ret) {
15499                 DRV_LOG(ERR, "Failed to create meter policy flow.");
15500                 return -1;
15501         }
15502         return 0;
15503 }
15504
15505 static int
15506 __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
15507                         uint32_t color_reg_c_idx,
15508                         uint16_t priority,
15509                         struct mlx5_flow_meter_sub_policy *sub_policy,
15510                         const struct rte_flow_attr *attr,
15511                         bool match_src_port,
15512                         const struct rte_flow_item *item,
15513                         struct mlx5_flow_dv_matcher **policy_matcher,
15514                         struct rte_flow_error *error)
15515 {
15516         struct mlx5_cache_entry *entry;
15517         struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
15518         struct mlx5_flow_dv_matcher matcher = {
15519                 .mask = {
15520                         .size = sizeof(matcher.mask.buf) -
15521                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15522                 },
15523                 .tbl = tbl_rsc,
15524         };
15525         struct mlx5_flow_dv_match_params value = {
15526                 .size = sizeof(value.buf) -
15527                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15528         };
15529         struct mlx5_flow_cb_ctx ctx = {
15530                 .error = error,
15531                 .data = &matcher,
15532         };
15533         struct mlx5_flow_tbl_data_entry *tbl_data;
15534         struct mlx5_priv *priv = dev->data->dev_private;
15535         uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
15536
15537         if (match_src_port && (priv->representor || priv->master)) {
15538                 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
15539                                                    value.buf, item, attr)) {
15540                         DRV_LOG(ERR,
15541                         "Failed to register meter drop matcher with port.");
15542                         return -1;
15543                 }
15544         }
15545         tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
15546         if (priority < RTE_COLOR_RED)
15547                 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
15548                         (enum modify_reg)color_reg_c_idx, 0, color_mask);
15549         matcher.priority = priority;
15550         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
15551                                         matcher.mask.size);
15552         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
15553         if (!entry) {
15554                 DRV_LOG(ERR, "Failed to register meter drop matcher.");
15555                 return -1;
15556         }
15557         *policy_matcher =
15558                 container_of(entry, struct mlx5_flow_dv_matcher, entry);
15559         return 0;
15560 }
15561
15562 /**
15563  * Create the policy rules per domain.
15564  *
15565  * @param[in] dev
15566  *   Pointer to Ethernet device.
15567  * @param[in] sub_policy
15568  *    Pointer to sub policy table..
15569  * @param[in] egress
15570  *   Direction of the table.
15571  * @param[in] transfer
15572  *   E-Switch or NIC flow.
15573  * @param[in] acts
15574  *   Pointer to policy action list per color.
15575  *
15576  * @return
15577  *   0 on success, -1 otherwise.
15578  */
15579 static int
15580 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
15581                 struct mlx5_flow_meter_sub_policy *sub_policy,
15582                 uint8_t egress, uint8_t transfer, bool match_src_port,
15583                 struct mlx5_meter_policy_acts acts[RTE_COLORS])
15584 {
15585         struct mlx5_priv *priv = dev->data->dev_private;
15586         struct rte_flow_error flow_err;
15587         uint32_t color_reg_c_idx;
15588         struct rte_flow_attr attr = {
15589                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
15590                 .priority = 0,
15591                 .ingress = 0,
15592                 .egress = !!egress,
15593                 .transfer = !!transfer,
15594                 .reserved = 0,
15595         };
15596         int i;
15597         int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
15598         struct mlx5_sub_policy_color_rule *color_rule;
15599
15600         if (ret < 0)
15601                 return -1;
15602         /* Create policy table with POLICY level. */
15603         if (!sub_policy->tbl_rsc)
15604                 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
15605                                 MLX5_FLOW_TABLE_LEVEL_POLICY,
15606                                 egress, transfer, false, NULL, 0, 0,
15607                                 sub_policy->idx, &flow_err);
15608         if (!sub_policy->tbl_rsc) {
15609                 DRV_LOG(ERR,
15610                         "Failed to create meter sub policy table.");
15611                 return -1;
15612         }
15613         /* Prepare matchers. */
15614         color_reg_c_idx = ret;
15615         for (i = 0; i < RTE_COLORS; i++) {
15616                 TAILQ_INIT(&sub_policy->color_rules[i]);
15617                 if (i == RTE_COLOR_YELLOW || !acts[i].actions_n)
15618                         continue;
15619                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
15620                                 sizeof(struct mlx5_sub_policy_color_rule),
15621                                 0, SOCKET_ID_ANY);
15622                 if (!color_rule) {
15623                         DRV_LOG(ERR, "No memory to create color rule.");
15624                         goto err_exit;
15625                 }
15626                 color_rule->src_port = priv->representor_id;
15627                 attr.priority = i;
15628                 /* Create matchers for Color. */
15629                 if (__flow_dv_create_policy_matcher(dev,
15630                                 color_reg_c_idx, i, sub_policy, &attr,
15631                                 (i != RTE_COLOR_RED ? match_src_port : false),
15632                                 NULL, &color_rule->matcher, &flow_err)) {
15633                         DRV_LOG(ERR, "Failed to create color matcher.");
15634                         goto err_exit;
15635                 }
15636                 /* Create flow, matching color. */
15637                 if (__flow_dv_create_policy_flow(dev,
15638                                 color_reg_c_idx, (enum rte_color)i,
15639                                 color_rule->matcher->matcher_object,
15640                                 acts[i].actions_n,
15641                                 acts[i].dv_actions,
15642                                 (i != RTE_COLOR_RED ? match_src_port : false),
15643                                 NULL, &color_rule->rule,
15644                                 &attr)) {
15645                         DRV_LOG(ERR, "Failed to create color rule.");
15646                         goto err_exit;
15647                 }
15648                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
15649                                   color_rule, next_port);
15650         }
15651         return 0;
15652 err_exit:
15653         if (color_rule) {
15654                 if (color_rule->rule)
15655                         mlx5_flow_os_destroy_flow(color_rule->rule);
15656                 if (color_rule->matcher) {
15657                         struct mlx5_flow_tbl_data_entry *tbl =
15658                                 container_of(color_rule->matcher->tbl,
15659                                                 typeof(*tbl), tbl);
15660                         mlx5_cache_unregister(&tbl->matchers,
15661                                                 &color_rule->matcher->entry);
15662                 }
15663                 mlx5_free(color_rule);
15664         }
15665         return -1;
15666 }
15667
15668 static int
15669 __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
15670                         struct mlx5_flow_meter_policy *mtr_policy,
15671                         struct mlx5_flow_meter_sub_policy *sub_policy,
15672                         uint32_t domain)
15673 {
15674         struct mlx5_priv *priv = dev->data->dev_private;
15675         struct mlx5_meter_policy_acts acts[RTE_COLORS];
15676         struct mlx5_flow_dv_tag_resource *tag;
15677         struct mlx5_flow_dv_port_id_action_resource *port_action;
15678         struct mlx5_hrxq *hrxq;
15679         struct mlx5_flow_meter_info *next_fm = NULL;
15680         struct mlx5_flow_meter_policy *next_policy;
15681         struct mlx5_flow_meter_sub_policy *next_sub_policy;
15682         struct mlx5_flow_tbl_data_entry *tbl_data;
15683         struct rte_flow_error error;
15684         uint8_t egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15685         uint8_t transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15686         bool mtr_first = egress || (transfer && priv->representor_id != UINT16_MAX);
15687         bool match_src_port = false;
15688         int i;
15689
15690         for (i = 0; i < RTE_COLORS; i++) {
15691                 acts[i].actions_n = 0;
15692                 if (i == RTE_COLOR_YELLOW)
15693                         continue;
15694                 if (i == RTE_COLOR_RED) {
15695                         /* Only support drop on red. */
15696                         acts[i].dv_actions[0] =
15697                         mtr_policy->dr_drop_action[domain];
15698                         acts[i].actions_n = 1;
15699                         continue;
15700                 }
15701                 if (mtr_policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR) {
15702                         struct rte_flow_attr attr = {
15703                                 .transfer = transfer
15704                         };
15705
15706                         next_fm = mlx5_flow_meter_find(priv,
15707                                         mtr_policy->act_cnt[i].next_mtr_id,
15708                                         NULL);
15709                         if (!next_fm) {
15710                                 DRV_LOG(ERR,
15711                                         "Failed to get next hierarchy meter.");
15712                                 goto err_exit;
15713                         }
15714                         if (mlx5_flow_meter_attach(priv, next_fm,
15715                                                    &attr, &error)) {
15716                                 DRV_LOG(ERR, "%s", error.message);
15717                                 next_fm = NULL;
15718                                 goto err_exit;
15719                         }
15720                         /* Meter action must be the first for TX. */
15721                         if (mtr_first) {
15722                                 acts[i].dv_actions[acts[i].actions_n] =
15723                                         next_fm->meter_action;
15724                                 acts[i].actions_n++;
15725                         }
15726                 }
15727                 if (mtr_policy->act_cnt[i].rix_mark) {
15728                         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
15729                                         mtr_policy->act_cnt[i].rix_mark);
15730                         if (!tag) {
15731                                 DRV_LOG(ERR, "Failed to find "
15732                                 "mark action for policy.");
15733                                 goto err_exit;
15734                         }
15735                         acts[i].dv_actions[acts[i].actions_n] =
15736                                                 tag->action;
15737                         acts[i].actions_n++;
15738                 }
15739                 if (mtr_policy->act_cnt[i].modify_hdr) {
15740                         acts[i].dv_actions[acts[i].actions_n] =
15741                         mtr_policy->act_cnt[i].modify_hdr->action;
15742                         acts[i].actions_n++;
15743                 }
15744                 if (mtr_policy->act_cnt[i].fate_action) {
15745                         switch (mtr_policy->act_cnt[i].fate_action) {
15746                         case MLX5_FLOW_FATE_PORT_ID:
15747                                 port_action = mlx5_ipool_get
15748                                         (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
15749                                 mtr_policy->act_cnt[i].rix_port_id_action);
15750                                 if (!port_action) {
15751                                         DRV_LOG(ERR, "Failed to find "
15752                                                 "port action for policy.");
15753                                         goto err_exit;
15754                                 }
15755                                 acts[i].dv_actions[acts[i].actions_n] =
15756                                 port_action->action;
15757                                 acts[i].actions_n++;
15758                                 mtr_policy->dev = dev;
15759                                 match_src_port = true;
15760                                 break;
15761                         case MLX5_FLOW_FATE_DROP:
15762                         case MLX5_FLOW_FATE_JUMP:
15763                                 acts[i].dv_actions[acts[i].actions_n] =
15764                                 mtr_policy->act_cnt[i].dr_jump_action[domain];
15765                                 acts[i].actions_n++;
15766                                 break;
15767                         case MLX5_FLOW_FATE_SHARED_RSS:
15768                         case MLX5_FLOW_FATE_QUEUE:
15769                                 hrxq = mlx5_ipool_get
15770                                 (priv->sh->ipool[MLX5_IPOOL_HRXQ],
15771                                 sub_policy->rix_hrxq[i]);
15772                                 if (!hrxq) {
15773                                         DRV_LOG(ERR, "Failed to find "
15774                                                 "queue action for policy.");
15775                                         goto err_exit;
15776                                 }
15777                                 acts[i].dv_actions[acts[i].actions_n] =
15778                                 hrxq->action;
15779                                 acts[i].actions_n++;
15780                                 break;
15781                         case MLX5_FLOW_FATE_MTR:
15782                                 if (!next_fm) {
15783                                         DRV_LOG(ERR,
15784                                                 "No next hierarchy meter.");
15785                                         goto err_exit;
15786                                 }
15787                                 if (!mtr_first) {
15788                                         acts[i].dv_actions[acts[i].actions_n] =
15789                                                         next_fm->meter_action;
15790                                         acts[i].actions_n++;
15791                                 }
15792                                 if (mtr_policy->act_cnt[i].next_sub_policy) {
15793                                         next_sub_policy =
15794                                         mtr_policy->act_cnt[i].next_sub_policy;
15795                                 } else {
15796                                         next_policy =
15797                                                 mlx5_flow_meter_policy_find(dev,
15798                                                 next_fm->policy_id, NULL);
15799                                         MLX5_ASSERT(next_policy);
15800                                         next_sub_policy =
15801                                         next_policy->sub_policys[domain][0];
15802                                 }
15803                                 tbl_data =
15804                                         container_of(next_sub_policy->tbl_rsc,
15805                                         struct mlx5_flow_tbl_data_entry, tbl);
15806                                 acts[i].dv_actions[acts[i].actions_n++] =
15807                                                         tbl_data->jump.action;
15808                                 if (mtr_policy->act_cnt[i].modify_hdr)
15809                                         match_src_port = !!transfer;
15810                                 break;
15811                         default:
15812                                 /*Queue action do nothing*/
15813                                 break;
15814                         }
15815                 }
15816         }
15817         if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
15818                                 egress, transfer, match_src_port, acts)) {
15819                 DRV_LOG(ERR,
15820                 "Failed to create policy rules per domain.");
15821                 goto err_exit;
15822         }
15823         return 0;
15824 err_exit:
15825         if (next_fm)
15826                 mlx5_flow_meter_detach(priv, next_fm);
15827         return -1;
15828 }
15829
15830 /**
15831  * Create the policy rules.
15832  *
15833  * @param[in] dev
15834  *   Pointer to Ethernet device.
15835  * @param[in,out] mtr_policy
15836  *   Pointer to meter policy table.
15837  *
15838  * @return
15839  *   0 on success, -1 otherwise.
15840  */
15841 static int
15842 flow_dv_create_policy_rules(struct rte_eth_dev *dev,
15843                              struct mlx5_flow_meter_policy *mtr_policy)
15844 {
15845         int i;
15846         uint16_t sub_policy_num;
15847
15848         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15849                 sub_policy_num = (mtr_policy->sub_policy_num >>
15850                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15851                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15852                 if (!sub_policy_num)
15853                         continue;
15854                 /* Prepare actions list and create policy rules. */
15855                 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
15856                         mtr_policy->sub_policys[i][0], i)) {
15857                         DRV_LOG(ERR,
15858                         "Failed to create policy action list per domain.");
15859                         return -1;
15860                 }
15861         }
15862         return 0;
15863 }
15864
15865 static int
15866 __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
15867 {
15868         struct mlx5_priv *priv = dev->data->dev_private;
15869         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15870         struct mlx5_flow_meter_def_policy *def_policy;
15871         struct mlx5_flow_tbl_resource *jump_tbl;
15872         struct mlx5_flow_tbl_data_entry *tbl_data;
15873         uint8_t egress, transfer;
15874         struct rte_flow_error error;
15875         struct mlx5_meter_policy_acts acts[RTE_COLORS];
15876         int ret;
15877
15878         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15879         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15880         def_policy = mtrmng->def_policy[domain];
15881         if (!def_policy) {
15882                 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
15883                         sizeof(struct mlx5_flow_meter_def_policy),
15884                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
15885                 if (!def_policy) {
15886                         DRV_LOG(ERR, "Failed to alloc "
15887                                         "default policy table.");
15888                         goto def_policy_error;
15889                 }
15890                 mtrmng->def_policy[domain] = def_policy;
15891                 /* Create the meter suffix table with SUFFIX level. */
15892                 jump_tbl = flow_dv_tbl_resource_get(dev,
15893                                 MLX5_FLOW_TABLE_LEVEL_METER,
15894                                 egress, transfer, false, NULL, 0,
15895                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
15896                 if (!jump_tbl) {
15897                         DRV_LOG(ERR,
15898                                 "Failed to create meter suffix table.");
15899                         goto def_policy_error;
15900                 }
15901                 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
15902                 tbl_data = container_of(jump_tbl,
15903                                 struct mlx5_flow_tbl_data_entry, tbl);
15904                 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
15905                                                 tbl_data->jump.action;
15906                 acts[RTE_COLOR_GREEN].dv_actions[0] =
15907                                                 tbl_data->jump.action;
15908                 acts[RTE_COLOR_GREEN].actions_n = 1;
15909                 /* Create jump action to the drop table. */
15910                 if (!mtrmng->drop_tbl[domain]) {
15911                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
15912                                 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
15913                                 egress, transfer, false, NULL, 0,
15914                                 0, MLX5_MTR_TABLE_ID_DROP, &error);
15915                         if (!mtrmng->drop_tbl[domain]) {
15916                                 DRV_LOG(ERR, "Failed to create "
15917                                 "meter drop table for default policy.");
15918                                 goto def_policy_error;
15919                         }
15920                 }
15921                 tbl_data = container_of(mtrmng->drop_tbl[domain],
15922                                 struct mlx5_flow_tbl_data_entry, tbl);
15923                 def_policy->dr_jump_action[RTE_COLOR_RED] =
15924                                                 tbl_data->jump.action;
15925                 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
15926                 acts[RTE_COLOR_RED].actions_n = 1;
15927                 /* Create default policy rules. */
15928                 ret = __flow_dv_create_domain_policy_rules(dev,
15929                                         &def_policy->sub_policy,
15930                                         egress, transfer, false, acts);
15931                 if (ret) {
15932                         DRV_LOG(ERR, "Failed to create "
15933                                 "default policy rules.");
15934                                 goto def_policy_error;
15935                 }
15936         }
15937         return 0;
15938 def_policy_error:
15939         __flow_dv_destroy_domain_def_policy(dev,
15940                         (enum mlx5_meter_domain)domain);
15941         return -1;
15942 }
15943
15944 /**
15945  * Create the default policy table set.
15946  *
15947  * @param[in] dev
15948  *   Pointer to Ethernet device.
15949  * @return
15950  *   0 on success, -1 otherwise.
15951  */
15952 static int
15953 flow_dv_create_def_policy(struct rte_eth_dev *dev)
15954 {
15955         struct mlx5_priv *priv = dev->data->dev_private;
15956         int i;
15957
15958         /* Non-termination policy table. */
15959         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15960                 if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
15961                         continue;
15962                 if (__flow_dv_create_domain_def_policy(dev, i)) {
15963                         DRV_LOG(ERR,
15964                         "Failed to create default policy");
15965                         return -1;
15966                 }
15967         }
15968         return 0;
15969 }
15970
15971 /**
15972  * Create the needed meter tables.
15973  * Lock free, (mutex should be acquired by caller).
15974  *
15975  * @param[in] dev
15976  *   Pointer to Ethernet device.
15977  * @param[in] fm
15978  *   Meter information table.
15979  * @param[in] mtr_idx
15980  *   Meter index.
15981  * @param[in] domain_bitmap
15982  *   Domain bitmap.
15983  * @return
15984  *   0 on success, -1 otherwise.
15985  */
15986 static int
15987 flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
15988                         struct mlx5_flow_meter_info *fm,
15989                         uint32_t mtr_idx,
15990                         uint8_t domain_bitmap)
15991 {
15992         struct mlx5_priv *priv = dev->data->dev_private;
15993         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15994         struct rte_flow_error error;
15995         struct mlx5_flow_tbl_data_entry *tbl_data;
15996         uint8_t egress, transfer;
15997         void *actions[METER_ACTIONS];
15998         int domain, ret, i;
15999         struct mlx5_flow_counter *cnt;
16000         struct mlx5_flow_dv_match_params value = {
16001                 .size = sizeof(value.buf) -
16002                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
16003         };
16004         struct mlx5_flow_dv_match_params matcher_para = {
16005                 .size = sizeof(matcher_para.buf) -
16006                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
16007         };
16008         int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
16009                                                      0, &error);
16010         uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
16011         uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
16012         struct mlx5_cache_entry *entry;
16013         struct mlx5_flow_dv_matcher matcher = {
16014                 .mask = {
16015                         .size = sizeof(matcher.mask.buf) -
16016                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
16017                 },
16018         };
16019         struct mlx5_flow_dv_matcher *drop_matcher;
16020         struct mlx5_flow_cb_ctx ctx = {
16021                 .error = &error,
16022                 .data = &matcher,
16023         };
16024
16025         if (!priv->mtr_en || mtr_id_reg_c < 0) {
16026                 rte_errno = ENOTSUP;
16027                 return -1;
16028         }
16029         for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
16030                 if (!(domain_bitmap & (1 << domain)) ||
16031                         (mtrmng->def_rule[domain] && !fm->drop_cnt))
16032                         continue;
16033                 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16034                 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16035                 /* Create the drop table with METER DROP level. */
16036                 if (!mtrmng->drop_tbl[domain]) {
16037                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
16038                                         MLX5_FLOW_TABLE_LEVEL_METER,
16039                                         egress, transfer, false, NULL, 0,
16040                                         0, MLX5_MTR_TABLE_ID_DROP, &error);
16041                         if (!mtrmng->drop_tbl[domain]) {
16042                                 DRV_LOG(ERR, "Failed to create meter drop table.");
16043                                 goto policy_error;
16044                         }
16045                 }
16046                 /* Create default matcher in drop table. */
16047                 matcher.tbl = mtrmng->drop_tbl[domain],
16048                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16049                                 struct mlx5_flow_tbl_data_entry, tbl);
16050                 if (!mtrmng->def_matcher[domain]) {
16051                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16052                                        (enum modify_reg)mtr_id_reg_c,
16053                                        0, 0);
16054                         matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
16055                         matcher.crc = rte_raw_cksum
16056                                         ((const void *)matcher.mask.buf,
16057                                         matcher.mask.size);
16058                         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
16059                         if (!entry) {
16060                                 DRV_LOG(ERR, "Failed to register meter "
16061                                 "drop default matcher.");
16062                                 goto policy_error;
16063                         }
16064                         mtrmng->def_matcher[domain] = container_of(entry,
16065                         struct mlx5_flow_dv_matcher, entry);
16066                 }
16067                 /* Create default rule in drop table. */
16068                 if (!mtrmng->def_rule[domain]) {
16069                         i = 0;
16070                         actions[i++] = priv->sh->dr_drop_action;
16071                         flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16072                                 (enum modify_reg)mtr_id_reg_c, 0, 0);
16073                         ret = mlx5_flow_os_create_flow
16074                                 (mtrmng->def_matcher[domain]->matcher_object,
16075                                 (void *)&value, i, actions,
16076                                 &mtrmng->def_rule[domain]);
16077                         if (ret) {
16078                                 DRV_LOG(ERR, "Failed to create meter "
16079                                 "default drop rule for drop table.");
16080                                 goto policy_error;
16081                         }
16082                 }
16083                 if (!fm->drop_cnt)
16084                         continue;
16085                 MLX5_ASSERT(mtrmng->max_mtr_bits);
16086                 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
16087                         /* Create matchers for Drop. */
16088                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16089                                         (enum modify_reg)mtr_id_reg_c, 0,
16090                                         (mtr_id_mask << mtr_id_offset));
16091                         matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
16092                         matcher.crc = rte_raw_cksum
16093                                         ((const void *)matcher.mask.buf,
16094                                         matcher.mask.size);
16095                         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
16096                         if (!entry) {
16097                                 DRV_LOG(ERR,
16098                                 "Failed to register meter drop matcher.");
16099                                 goto policy_error;
16100                         }
16101                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
16102                                 container_of(entry, struct mlx5_flow_dv_matcher,
16103                                              entry);
16104                 }
16105                 drop_matcher =
16106                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
16107                 /* Create drop rule, matching meter_id only. */
16108                 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16109                                 (enum modify_reg)mtr_id_reg_c,
16110                                 (mtr_idx << mtr_id_offset), UINT32_MAX);
16111                 i = 0;
16112                 cnt = flow_dv_counter_get_by_idx(dev,
16113                                         fm->drop_cnt, NULL);
16114                 actions[i++] = cnt->action;
16115                 actions[i++] = priv->sh->dr_drop_action;
16116                 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
16117                                                (void *)&value, i, actions,
16118                                                &fm->drop_rule[domain]);
16119                 if (ret) {
16120                         DRV_LOG(ERR, "Failed to create meter "
16121                                 "drop rule for drop table.");
16122                                 goto policy_error;
16123                 }
16124         }
16125         return 0;
16126 policy_error:
16127         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16128                 if (fm->drop_rule[i]) {
16129                         claim_zero(mlx5_flow_os_destroy_flow
16130                                 (fm->drop_rule[i]));
16131                         fm->drop_rule[i] = NULL;
16132                 }
16133         }
16134         return -1;
16135 }
16136
16137 static struct mlx5_flow_meter_sub_policy *
16138 __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
16139                 struct mlx5_flow_meter_policy *mtr_policy,
16140                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS],
16141                 struct mlx5_flow_meter_sub_policy *next_sub_policy,
16142                 bool *is_reuse)
16143 {
16144         struct mlx5_priv *priv = dev->data->dev_private;
16145         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16146         uint32_t sub_policy_idx = 0;
16147         uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
16148         uint32_t i, j;
16149         struct mlx5_hrxq *hrxq;
16150         struct mlx5_flow_handle dh;
16151         struct mlx5_meter_policy_action_container *act_cnt;
16152         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16153         uint16_t sub_policy_num;
16154
16155         rte_spinlock_lock(&mtr_policy->sl);
16156         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16157                 if (!rss_desc[i])
16158                         continue;
16159                 hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
16160                 if (!hrxq_idx[i]) {
16161                         rte_spinlock_unlock(&mtr_policy->sl);
16162                         return NULL;
16163                 }
16164         }
16165         sub_policy_num = (mtr_policy->sub_policy_num >>
16166                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16167                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16168         for (i = 0; i < sub_policy_num;
16169                 i++) {
16170                 for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
16171                         if (rss_desc[j] &&
16172                                 hrxq_idx[j] !=
16173                         mtr_policy->sub_policys[domain][i]->rix_hrxq[j])
16174                                 break;
16175                 }
16176                 if (j >= MLX5_MTR_RTE_COLORS) {
16177                         /*
16178                          * Found the sub policy table with
16179                          * the same queue per color
16180                          */
16181                         rte_spinlock_unlock(&mtr_policy->sl);
16182                         for (j = 0; j < MLX5_MTR_RTE_COLORS; j++)
16183                                 mlx5_hrxq_release(dev, hrxq_idx[j]);
16184                         *is_reuse = true;
16185                         return mtr_policy->sub_policys[domain][i];
16186                 }
16187         }
16188         /* Create sub policy. */
16189         if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
16190                 /* Reuse the first dummy sub_policy*/
16191                 sub_policy = mtr_policy->sub_policys[domain][0];
16192                 sub_policy_idx = sub_policy->idx;
16193         } else {
16194                 sub_policy = mlx5_ipool_zmalloc
16195                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16196                                 &sub_policy_idx);
16197                 if (!sub_policy ||
16198                         sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
16199                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16200                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
16201                         goto rss_sub_policy_error;
16202                 }
16203                 sub_policy->idx = sub_policy_idx;
16204                 sub_policy->main_policy = mtr_policy;
16205         }
16206         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16207                 if (!rss_desc[i])
16208                         continue;
16209                 sub_policy->rix_hrxq[i] = hrxq_idx[i];
16210                 if (mtr_policy->is_hierarchy) {
16211                         act_cnt = &mtr_policy->act_cnt[i];
16212                         act_cnt->next_sub_policy = next_sub_policy;
16213                         mlx5_hrxq_release(dev, hrxq_idx[i]);
16214                 } else {
16215                         /*
16216                          * Overwrite the last action from
16217                          * RSS action to Queue action.
16218                          */
16219                         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
16220                                 hrxq_idx[i]);
16221                         if (!hrxq) {
16222                                 DRV_LOG(ERR, "Failed to create policy hrxq");
16223                                 goto rss_sub_policy_error;
16224                         }
16225                         act_cnt = &mtr_policy->act_cnt[i];
16226                         if (act_cnt->rix_mark || act_cnt->modify_hdr) {
16227                                 memset(&dh, 0, sizeof(struct mlx5_flow_handle));
16228                                 if (act_cnt->rix_mark)
16229                                         dh.mark = 1;
16230                                 dh.fate_action = MLX5_FLOW_FATE_QUEUE;
16231                                 dh.rix_hrxq = hrxq_idx[i];
16232                                 flow_drv_rxq_flags_set(dev, &dh);
16233                         }
16234                 }
16235         }
16236         if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16237                 sub_policy, domain)) {
16238                 DRV_LOG(ERR, "Failed to create policy "
16239                         "rules per domain.");
16240                 goto rss_sub_policy_error;
16241         }
16242         if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16243                 i = (mtr_policy->sub_policy_num >>
16244                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16245                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16246                 mtr_policy->sub_policys[domain][i] = sub_policy;
16247                 i++;
16248                 if (i > MLX5_MTR_RSS_MAX_SUB_POLICY)
16249                         goto rss_sub_policy_error;
16250                 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16251                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16252                 mtr_policy->sub_policy_num |=
16253                         (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16254                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16255         }
16256         rte_spinlock_unlock(&mtr_policy->sl);
16257         *is_reuse = false;
16258         return sub_policy;
16259 rss_sub_policy_error:
16260         if (sub_policy) {
16261                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16262                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16263                         i = (mtr_policy->sub_policy_num >>
16264                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16265                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16266                         mtr_policy->sub_policys[domain][i] = NULL;
16267                         mlx5_ipool_free
16268                         (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16269                                         sub_policy->idx);
16270                 }
16271         }
16272         rte_spinlock_unlock(&mtr_policy->sl);
16273         return NULL;
16274 }
16275
16276 /**
16277  * Find the policy table for prefix table with RSS.
16278  *
16279  * @param[in] dev
16280  *   Pointer to Ethernet device.
16281  * @param[in] mtr_policy
16282  *   Pointer to meter policy table.
16283  * @param[in] rss_desc
16284  *   Pointer to rss_desc
16285  * @return
16286  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
16287  */
16288 static struct mlx5_flow_meter_sub_policy *
16289 flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
16290                 struct mlx5_flow_meter_policy *mtr_policy,
16291                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
16292 {
16293         struct mlx5_priv *priv = dev->data->dev_private;
16294         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16295         struct mlx5_flow_meter_info *next_fm;
16296         struct mlx5_flow_meter_policy *next_policy;
16297         struct mlx5_flow_meter_sub_policy *next_sub_policy = NULL;
16298         struct mlx5_flow_meter_policy *policies[MLX5_MTR_CHAIN_MAX_NUM];
16299         struct mlx5_flow_meter_sub_policy *sub_policies[MLX5_MTR_CHAIN_MAX_NUM];
16300         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16301         bool reuse_sub_policy;
16302         uint32_t i = 0;
16303         uint32_t j = 0;
16304
16305         while (true) {
16306                 /* Iterate hierarchy to get all policies in this hierarchy. */
16307                 policies[i++] = mtr_policy;
16308                 if (!mtr_policy->is_hierarchy)
16309                         break;
16310                 if (i >= MLX5_MTR_CHAIN_MAX_NUM) {
16311                         DRV_LOG(ERR, "Exceed max meter number in hierarchy.");
16312                         return NULL;
16313                 }
16314                 next_fm = mlx5_flow_meter_find(priv,
16315                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
16316                 if (!next_fm) {
16317                         DRV_LOG(ERR, "Failed to get next meter in hierarchy.");
16318                         return NULL;
16319                 }
16320                 next_policy =
16321                         mlx5_flow_meter_policy_find(dev, next_fm->policy_id,
16322                                                     NULL);
16323                 MLX5_ASSERT(next_policy);
16324                 mtr_policy = next_policy;
16325         }
16326         while (i) {
16327                 /**
16328                  * From last policy to the first one in hierarchy,
16329                  * create/get the sub policy for each of them.
16330                  */
16331                 sub_policy = __flow_dv_meter_get_rss_sub_policy(dev,
16332                                                         policies[--i],
16333                                                         rss_desc,
16334                                                         next_sub_policy,
16335                                                         &reuse_sub_policy);
16336                 if (!sub_policy) {
16337                         DRV_LOG(ERR, "Failed to get the sub policy.");
16338                         goto err_exit;
16339                 }
16340                 if (!reuse_sub_policy)
16341                         sub_policies[j++] = sub_policy;
16342                 next_sub_policy = sub_policy;
16343         }
16344         return sub_policy;
16345 err_exit:
16346         while (j) {
16347                 uint16_t sub_policy_num;
16348
16349                 sub_policy = sub_policies[--j];
16350                 mtr_policy = sub_policy->main_policy;
16351                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16352                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16353                         sub_policy_num = (mtr_policy->sub_policy_num >>
16354                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16355                                 MLX5_MTR_SUB_POLICY_NUM_MASK;
16356                         mtr_policy->sub_policys[domain][sub_policy_num - 1] =
16357                                                                         NULL;
16358                         sub_policy_num--;
16359                         mtr_policy->sub_policy_num &=
16360                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16361                                   (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i));
16362                         mtr_policy->sub_policy_num |=
16363                         (sub_policy_num & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16364                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i);
16365                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16366                                         sub_policy->idx);
16367                 }
16368         }
16369         return NULL;
16370 }
16371
16372 /**
16373  * Create the sub policy tag rule for all meters in hierarchy.
16374  *
16375  * @param[in] dev
16376  *   Pointer to Ethernet device.
16377  * @param[in] fm
16378  *   Meter information table.
16379  * @param[in] src_port
16380  *   The src port this extra rule should use.
16381  * @param[in] item
16382  *   The src port match item.
16383  * @param[out] error
16384  *   Perform verbose error reporting if not NULL.
16385  * @return
16386  *   0 on success, a negative errno value otherwise and rte_errno is set.
16387  */
16388 static int
16389 flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
16390                                 struct mlx5_flow_meter_info *fm,
16391                                 int32_t src_port,
16392                                 const struct rte_flow_item *item,
16393                                 struct rte_flow_error *error)
16394 {
16395         struct mlx5_priv *priv = dev->data->dev_private;
16396         struct mlx5_flow_meter_policy *mtr_policy;
16397         struct mlx5_flow_meter_sub_policy *sub_policy;
16398         struct mlx5_flow_meter_info *next_fm = NULL;
16399         struct mlx5_flow_meter_policy *next_policy;
16400         struct mlx5_flow_meter_sub_policy *next_sub_policy;
16401         struct mlx5_flow_tbl_data_entry *tbl_data;
16402         struct mlx5_sub_policy_color_rule *color_rule;
16403         struct mlx5_meter_policy_acts acts;
16404         uint32_t color_reg_c_idx;
16405         bool mtr_first = (src_port != UINT16_MAX) ? true : false;
16406         struct rte_flow_attr attr = {
16407                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
16408                 .priority = 0,
16409                 .ingress = 0,
16410                 .egress = 0,
16411                 .transfer = 1,
16412                 .reserved = 0,
16413         };
16414         uint32_t domain = MLX5_MTR_DOMAIN_TRANSFER;
16415         int i;
16416
16417         mtr_policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
16418         MLX5_ASSERT(mtr_policy);
16419         if (!mtr_policy->is_hierarchy)
16420                 return 0;
16421         next_fm = mlx5_flow_meter_find(priv,
16422                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
16423         if (!next_fm) {
16424                 return rte_flow_error_set(error, EINVAL,
16425                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
16426                                 "Failed to find next meter in hierarchy.");
16427         }
16428         if (!next_fm->drop_cnt)
16429                 goto exit;
16430         color_reg_c_idx = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, error);
16431         sub_policy = mtr_policy->sub_policys[domain][0];
16432         for (i = 0; i < RTE_COLORS; i++) {
16433                 bool rule_exist = false;
16434                 struct mlx5_meter_policy_action_container *act_cnt;
16435
16436                 if (i >= RTE_COLOR_YELLOW)
16437                         break;
16438                 TAILQ_FOREACH(color_rule,
16439                               &sub_policy->color_rules[i], next_port)
16440                         if (color_rule->src_port == src_port) {
16441                                 rule_exist = true;
16442                                 break;
16443                         }
16444                 if (rule_exist)
16445                         continue;
16446                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
16447                                 sizeof(struct mlx5_sub_policy_color_rule),
16448                                 0, SOCKET_ID_ANY);
16449                 if (!color_rule)
16450                         return rte_flow_error_set(error, ENOMEM,
16451                                 RTE_FLOW_ERROR_TYPE_ACTION,
16452                                 NULL, "No memory to create tag color rule.");
16453                 color_rule->src_port = src_port;
16454                 attr.priority = i;
16455                 next_policy = mlx5_flow_meter_policy_find(dev,
16456                                                 next_fm->policy_id, NULL);
16457                 MLX5_ASSERT(next_policy);
16458                 next_sub_policy = next_policy->sub_policys[domain][0];
16459                 tbl_data = container_of(next_sub_policy->tbl_rsc,
16460                                         struct mlx5_flow_tbl_data_entry, tbl);
16461                 act_cnt = &mtr_policy->act_cnt[i];
16462                 if (mtr_first) {
16463                         acts.dv_actions[0] = next_fm->meter_action;
16464                         acts.dv_actions[1] = act_cnt->modify_hdr->action;
16465                 } else {
16466                         acts.dv_actions[0] = act_cnt->modify_hdr->action;
16467                         acts.dv_actions[1] = next_fm->meter_action;
16468                 }
16469                 acts.dv_actions[2] = tbl_data->jump.action;
16470                 acts.actions_n = 3;
16471                 if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) {
16472                         next_fm = NULL;
16473                         goto err_exit;
16474                 }
16475                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
16476                                         i, sub_policy, &attr, true, item,
16477                                         &color_rule->matcher, error)) {
16478                         rte_flow_error_set(error, errno,
16479                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16480                                 "Failed to create hierarchy meter matcher.");
16481                         goto err_exit;
16482                 }
16483                 if (__flow_dv_create_policy_flow(dev, color_reg_c_idx,
16484                                         (enum rte_color)i,
16485                                         color_rule->matcher->matcher_object,
16486                                         acts.actions_n, acts.dv_actions,
16487                                         true, item,
16488                                         &color_rule->rule, &attr)) {
16489                         rte_flow_error_set(error, errno,
16490                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16491                                 "Failed to create hierarchy meter rule.");
16492                         goto err_exit;
16493                 }
16494                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
16495                                   color_rule, next_port);
16496         }
16497 exit:
16498         /**
16499          * Recursive call to iterate all meters in hierarchy and
16500          * create needed rules.
16501          */
16502         return flow_dv_meter_hierarchy_rule_create(dev, next_fm,
16503                                                 src_port, item, error);
16504 err_exit:
16505         if (color_rule) {
16506                 if (color_rule->rule)
16507                         mlx5_flow_os_destroy_flow(color_rule->rule);
16508                 if (color_rule->matcher) {
16509                         struct mlx5_flow_tbl_data_entry *tbl =
16510                                 container_of(color_rule->matcher->tbl,
16511                                                 typeof(*tbl), tbl);
16512                         mlx5_cache_unregister(&tbl->matchers,
16513                                                 &color_rule->matcher->entry);
16514                 }
16515                 mlx5_free(color_rule);
16516         }
16517         if (next_fm)
16518                 mlx5_flow_meter_detach(priv, next_fm);
16519         return -rte_errno;
16520 }
16521
16522 /**
16523  * Destroy the sub policy table with RX queue.
16524  *
16525  * @param[in] dev
16526  *   Pointer to Ethernet device.
16527  * @param[in] mtr_policy
16528  *   Pointer to meter policy table.
16529  */
16530 static void
16531 flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
16532                 struct mlx5_flow_meter_policy *mtr_policy)
16533 {
16534         struct mlx5_priv *priv = dev->data->dev_private;
16535         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16536         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16537         uint32_t i, j;
16538         uint16_t sub_policy_num, new_policy_num;
16539
16540         rte_spinlock_lock(&mtr_policy->sl);
16541         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16542                 switch (mtr_policy->act_cnt[i].fate_action) {
16543                 case MLX5_FLOW_FATE_SHARED_RSS:
16544                         sub_policy_num = (mtr_policy->sub_policy_num >>
16545                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16546                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16547                         new_policy_num = sub_policy_num;
16548                         for (j = 0; j < sub_policy_num; j++) {
16549                                 sub_policy =
16550                                         mtr_policy->sub_policys[domain][j];
16551                                 if (sub_policy) {
16552                                         __flow_dv_destroy_sub_policy_rules(dev,
16553                                                 sub_policy);
16554                                 if (sub_policy !=
16555                                         mtr_policy->sub_policys[domain][0]) {
16556                                         mtr_policy->sub_policys[domain][j] =
16557                                                                 NULL;
16558                                         mlx5_ipool_free
16559                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16560                                                 sub_policy->idx);
16561                                                 new_policy_num--;
16562                                         }
16563                                 }
16564                         }
16565                         if (new_policy_num != sub_policy_num) {
16566                                 mtr_policy->sub_policy_num &=
16567                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16568                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16569                                 mtr_policy->sub_policy_num |=
16570                                 (new_policy_num &
16571                                         MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16572                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16573                         }
16574                         break;
16575                 case MLX5_FLOW_FATE_QUEUE:
16576                         sub_policy = mtr_policy->sub_policys[domain][0];
16577                         __flow_dv_destroy_sub_policy_rules(dev,
16578                                                 sub_policy);
16579                         break;
16580                 default:
16581                         /*Other actions without queue and do nothing*/
16582                         break;
16583                 }
16584         }
16585         rte_spinlock_unlock(&mtr_policy->sl);
16586 }
16587
16588 /**
16589  * Validate the batch counter support in root table.
16590  *
16591  * Create a simple flow with invalid counter and drop action on root table to
16592  * validate if batch counter with offset on root table is supported or not.
16593  *
16594  * @param[in] dev
16595  *   Pointer to rte_eth_dev structure.
16596  *
16597  * @return
16598  *   0 on success, a negative errno value otherwise and rte_errno is set.
16599  */
16600 int
16601 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
16602 {
16603         struct mlx5_priv *priv = dev->data->dev_private;
16604         struct mlx5_dev_ctx_shared *sh = priv->sh;
16605         struct mlx5_flow_dv_match_params mask = {
16606                 .size = sizeof(mask.buf),
16607         };
16608         struct mlx5_flow_dv_match_params value = {
16609                 .size = sizeof(value.buf),
16610         };
16611         struct mlx5dv_flow_matcher_attr dv_attr = {
16612                 .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
16613                 .priority = 0,
16614                 .match_criteria_enable = 0,
16615                 .match_mask = (void *)&mask,
16616         };
16617         void *actions[2] = { 0 };
16618         struct mlx5_flow_tbl_resource *tbl = NULL;
16619         struct mlx5_devx_obj *dcs = NULL;
16620         void *matcher = NULL;
16621         void *flow = NULL;
16622         int ret = -1;
16623
16624         tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
16625                                         0, 0, 0, NULL);
16626         if (!tbl)
16627                 goto err;
16628         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
16629         if (!dcs)
16630                 goto err;
16631         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
16632                                                     &actions[0]);
16633         if (ret)
16634                 goto err;
16635         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
16636         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
16637                                                &matcher);
16638         if (ret)
16639                 goto err;
16640         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
16641                                        actions, &flow);
16642 err:
16643         /*
16644          * If batch counter with offset is not supported, the driver will not
16645          * validate the invalid offset value, flow create should success.
16646          * In this case, it means batch counter is not supported in root table.
16647          *
16648          * Otherwise, if flow create is failed, counter offset is supported.
16649          */
16650         if (flow) {
16651                 DRV_LOG(INFO, "Batch counter is not supported in root "
16652                               "table. Switch to fallback mode.");
16653                 rte_errno = ENOTSUP;
16654                 ret = -rte_errno;
16655                 claim_zero(mlx5_flow_os_destroy_flow(flow));
16656         } else {
16657                 /* Check matcher to make sure validate fail at flow create. */
16658                 if (!matcher || (matcher && errno != EINVAL))
16659                         DRV_LOG(ERR, "Unexpected error in counter offset "
16660                                      "support detection");
16661                 ret = 0;
16662         }
16663         if (actions[0])
16664                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
16665         if (matcher)
16666                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
16667         if (tbl)
16668                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
16669         if (dcs)
16670                 claim_zero(mlx5_devx_cmd_destroy(dcs));
16671         return ret;
16672 }
16673
16674 /**
16675  * Query a devx counter.
16676  *
16677  * @param[in] dev
16678  *   Pointer to the Ethernet device structure.
16679  * @param[in] cnt
16680  *   Index to the flow counter.
16681  * @param[in] clear
16682  *   Set to clear the counter statistics.
16683  * @param[out] pkts
16684  *   The statistics value of packets.
16685  * @param[out] bytes
16686  *   The statistics value of bytes.
16687  *
16688  * @return
16689  *   0 on success, otherwise return -1.
16690  */
16691 static int
16692 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
16693                       uint64_t *pkts, uint64_t *bytes)
16694 {
16695         struct mlx5_priv *priv = dev->data->dev_private;
16696         struct mlx5_flow_counter *cnt;
16697         uint64_t inn_pkts, inn_bytes;
16698         int ret;
16699
16700         if (!priv->config.devx)
16701                 return -1;
16702
16703         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
16704         if (ret)
16705                 return -1;
16706         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
16707         *pkts = inn_pkts - cnt->hits;
16708         *bytes = inn_bytes - cnt->bytes;
16709         if (clear) {
16710                 cnt->hits = inn_pkts;
16711                 cnt->bytes = inn_bytes;
16712         }
16713         return 0;
16714 }
16715
16716 /**
16717  * Get aged-out flows.
16718  *
16719  * @param[in] dev
16720  *   Pointer to the Ethernet device structure.
16721  * @param[in] context
16722  *   The address of an array of pointers to the aged-out flows contexts.
16723  * @param[in] nb_contexts
16724  *   The length of context array pointers.
16725  * @param[out] error
16726  *   Perform verbose error reporting if not NULL. Initialized in case of
16727  *   error only.
16728  *
16729  * @return
16730  *   how many contexts get in success, otherwise negative errno value.
16731  *   if nb_contexts is 0, return the amount of all aged contexts.
16732  *   if nb_contexts is not 0 , return the amount of aged flows reported
16733  *   in the context array.
16734  * @note: only stub for now
16735  */
16736 static int
16737 flow_get_aged_flows(struct rte_eth_dev *dev,
16738                     void **context,
16739                     uint32_t nb_contexts,
16740                     struct rte_flow_error *error)
16741 {
16742         struct mlx5_priv *priv = dev->data->dev_private;
16743         struct mlx5_age_info *age_info;
16744         struct mlx5_age_param *age_param;
16745         struct mlx5_flow_counter *counter;
16746         struct mlx5_aso_age_action *act;
16747         int nb_flows = 0;
16748
16749         if (nb_contexts && !context)
16750                 return rte_flow_error_set(error, EINVAL,
16751                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16752                                           NULL, "empty context");
16753         age_info = GET_PORT_AGE_INFO(priv);
16754         rte_spinlock_lock(&age_info->aged_sl);
16755         LIST_FOREACH(act, &age_info->aged_aso, next) {
16756                 nb_flows++;
16757                 if (nb_contexts) {
16758                         context[nb_flows - 1] =
16759                                                 act->age_params.context;
16760                         if (!(--nb_contexts))
16761                                 break;
16762                 }
16763         }
16764         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
16765                 nb_flows++;
16766                 if (nb_contexts) {
16767                         age_param = MLX5_CNT_TO_AGE(counter);
16768                         context[nb_flows - 1] = age_param->context;
16769                         if (!(--nb_contexts))
16770                                 break;
16771                 }
16772         }
16773         rte_spinlock_unlock(&age_info->aged_sl);
16774         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
16775         return nb_flows;
16776 }
16777
16778 /*
16779  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
16780  */
16781 static uint32_t
16782 flow_dv_counter_allocate(struct rte_eth_dev *dev)
16783 {
16784         return flow_dv_counter_alloc(dev, 0);
16785 }
16786
16787 /**
16788  * Validate indirect action.
16789  * Dispatcher for action type specific validation.
16790  *
16791  * @param[in] dev
16792  *   Pointer to the Ethernet device structure.
16793  * @param[in] conf
16794  *   Indirect action configuration.
16795  * @param[in] action
16796  *   The indirect action object to validate.
16797  * @param[out] error
16798  *   Perform verbose error reporting if not NULL. Initialized in case of
16799  *   error only.
16800  *
16801  * @return
16802  *   0 on success, otherwise negative errno value.
16803  */
16804 static int
16805 flow_dv_action_validate(struct rte_eth_dev *dev,
16806                         const struct rte_flow_indir_action_conf *conf,
16807                         const struct rte_flow_action *action,
16808                         struct rte_flow_error *err)
16809 {
16810         struct mlx5_priv *priv = dev->data->dev_private;
16811
16812         RTE_SET_USED(conf);
16813         switch (action->type) {
16814         case RTE_FLOW_ACTION_TYPE_RSS:
16815                 /*
16816                  * priv->obj_ops is set according to driver capabilities.
16817                  * When DevX capabilities are
16818                  * sufficient, it is set to devx_obj_ops.
16819                  * Otherwise, it is set to ibv_obj_ops.
16820                  * ibv_obj_ops doesn't support ind_table_modify operation.
16821                  * In this case the indirect RSS action can't be used.
16822                  */
16823                 if (priv->obj_ops.ind_table_modify == NULL)
16824                         return rte_flow_error_set
16825                                         (err, ENOTSUP,
16826                                          RTE_FLOW_ERROR_TYPE_ACTION,
16827                                          NULL,
16828                                          "Indirect RSS action not supported");
16829                 return mlx5_validate_action_rss(dev, action, err);
16830         case RTE_FLOW_ACTION_TYPE_AGE:
16831                 if (!priv->sh->aso_age_mng)
16832                         return rte_flow_error_set(err, ENOTSUP,
16833                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16834                                                 NULL,
16835                                                 "Indirect age action not supported");
16836                 return flow_dv_validate_action_age(0, action, dev, err);
16837         case RTE_FLOW_ACTION_TYPE_COUNT:
16838                 /*
16839                  * There are two mechanisms to share the action count.
16840                  * The old mechanism uses the shared field to share, while the
16841                  * new mechanism uses the indirect action API.
16842                  * This validation comes to make sure that the two mechanisms
16843                  * are not combined.
16844                  */
16845                 if (is_shared_action_count(action))
16846                         return rte_flow_error_set(err, ENOTSUP,
16847                                                   RTE_FLOW_ERROR_TYPE_ACTION,
16848                                                   NULL,
16849                                                   "Mix shared and indirect counter is not supported");
16850                 return flow_dv_validate_action_count(dev, true, 0, err);
16851         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
16852                 if (!priv->sh->ct_aso_en)
16853                         return rte_flow_error_set(err, ENOTSUP,
16854                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16855                                         "ASO CT is not supported");
16856                 return mlx5_validate_action_ct(dev, action->conf, err);
16857         default:
16858                 return rte_flow_error_set(err, ENOTSUP,
16859                                           RTE_FLOW_ERROR_TYPE_ACTION,
16860                                           NULL,
16861                                           "action type not supported");
16862         }
16863 }
16864
16865 /**
16866  * Validate the meter hierarchy chain for meter policy.
16867  *
16868  * @param[in] dev
16869  *   Pointer to the Ethernet device structure.
16870  * @param[in] meter_id
16871  *   Meter id.
16872  * @param[in] action_flags
16873  *   Holds the actions detected until now.
16874  * @param[out] is_rss
16875  *   Is RSS or not.
16876  * @param[out] hierarchy_domain
16877  *   The domain bitmap for hierarchy policy.
16878  * @param[out] error
16879  *   Perform verbose error reporting if not NULL. Initialized in case of
16880  *   error only.
16881  *
16882  * @return
16883  *   0 on success, otherwise negative errno value with error set.
16884  */
16885 static int
16886 flow_dv_validate_policy_mtr_hierarchy(struct rte_eth_dev *dev,
16887                                   uint32_t meter_id,
16888                                   uint64_t action_flags,
16889                                   bool *is_rss,
16890                                   uint8_t *hierarchy_domain,
16891                                   struct rte_mtr_error *error)
16892 {
16893         struct mlx5_priv *priv = dev->data->dev_private;
16894         struct mlx5_flow_meter_info *fm;
16895         struct mlx5_flow_meter_policy *policy;
16896         uint8_t cnt = 1;
16897
16898         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
16899                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
16900                 return -rte_mtr_error_set(error, EINVAL,
16901                                         RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN,
16902                                         NULL,
16903                                         "Multiple fate actions not supported.");
16904         while (true) {
16905                 fm = mlx5_flow_meter_find(priv, meter_id, NULL);
16906                 if (!fm)
16907                         return -rte_mtr_error_set(error, EINVAL,
16908                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
16909                                         "Meter not found in meter hierarchy.");
16910                 if (fm->def_policy)
16911                         return -rte_mtr_error_set(error, EINVAL,
16912                                         RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
16913                         "Non termination meter not supported in hierarchy.");
16914                 policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
16915                 MLX5_ASSERT(policy);
16916                 if (!policy->is_hierarchy) {
16917                         if (policy->transfer)
16918                                 *hierarchy_domain |=
16919                                                 MLX5_MTR_DOMAIN_TRANSFER_BIT;
16920                         if (policy->ingress)
16921                                 *hierarchy_domain |=
16922                                                 MLX5_MTR_DOMAIN_INGRESS_BIT;
16923                         if (policy->egress)
16924                                 *hierarchy_domain |= MLX5_MTR_DOMAIN_EGRESS_BIT;
16925                         *is_rss = policy->is_rss;
16926                         break;
16927                 }
16928                 meter_id = policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id;
16929                 if (++cnt >= MLX5_MTR_CHAIN_MAX_NUM)
16930                         return -rte_mtr_error_set(error, EINVAL,
16931                                         RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
16932                                         "Exceed max hierarchy meter number.");
16933         }
16934         return 0;
16935 }
16936
16937 /**
16938  * Validate meter policy actions.
16939  * Dispatcher for action type specific validation.
16940  *
16941  * @param[in] dev
16942  *   Pointer to the Ethernet device structure.
16943  * @param[in] action
16944  *   The meter policy action object to validate.
16945  * @param[in] attr
16946  *   Attributes of flow to determine steering domain.
16947  * @param[out] error
16948  *   Perform verbose error reporting if not NULL. Initialized in case of
16949  *   error only.
16950  *
16951  * @return
16952  *   0 on success, otherwise negative errno value.
16953  */
16954 static int
16955 flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
16956                         const struct rte_flow_action *actions[RTE_COLORS],
16957                         struct rte_flow_attr *attr,
16958                         bool *is_rss,
16959                         uint8_t *domain_bitmap,
16960                         bool *is_def_policy,
16961                         struct rte_mtr_error *error)
16962 {
16963         struct mlx5_priv *priv = dev->data->dev_private;
16964         struct mlx5_dev_config *dev_conf = &priv->config;
16965         const struct rte_flow_action *act;
16966         uint64_t action_flags = 0;
16967         int actions_n;
16968         int i, ret;
16969         struct rte_flow_error flow_err;
16970         uint8_t domain_color[RTE_COLORS] = {0};
16971         uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
16972         uint8_t hierarchy_domain = 0;
16973         const struct rte_flow_action_meter *mtr;
16974
16975         if (!priv->config.dv_esw_en)
16976                 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
16977         *domain_bitmap = def_domain;
16978         if (actions[RTE_COLOR_YELLOW] &&
16979                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_END)
16980                 return -rte_mtr_error_set(error, ENOTSUP,
16981                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
16982                                 NULL,
16983                                 "Yellow color does not support any action.");
16984         if (actions[RTE_COLOR_YELLOW] &&
16985                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_DROP)
16986                 return -rte_mtr_error_set(error, ENOTSUP,
16987                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
16988                                 NULL, "Red color only supports drop action.");
16989         /*
16990          * Check default policy actions:
16991          * Green/Yellow: no action, Red: drop action
16992          */
16993         if ((!actions[RTE_COLOR_GREEN] ||
16994                 actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)) {
16995                 *is_def_policy = true;
16996                 return 0;
16997         }
16998         flow_err.message = NULL;
16999         for (i = 0; i < RTE_COLORS; i++) {
17000                 act = actions[i];
17001                 for (action_flags = 0, actions_n = 0;
17002                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
17003                         act++) {
17004                         if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
17005                                 return -rte_mtr_error_set(error, ENOTSUP,
17006                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17007                                           NULL, "too many actions");
17008                         switch (act->type) {
17009                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
17010                                 if (!priv->config.dv_esw_en)
17011                                         return -rte_mtr_error_set(error,
17012                                         ENOTSUP,
17013                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17014                                         NULL, "PORT action validate check"
17015                                         " fail for ESW disable");
17016                                 ret = flow_dv_validate_action_port_id(dev,
17017                                                 action_flags,
17018                                                 act, attr, &flow_err);
17019                                 if (ret)
17020                                         return -rte_mtr_error_set(error,
17021                                         ENOTSUP,
17022                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17023                                         NULL, flow_err.message ?
17024                                         flow_err.message :
17025                                         "PORT action validate check fail");
17026                                 ++actions_n;
17027                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
17028                                 break;
17029                         case RTE_FLOW_ACTION_TYPE_MARK:
17030                                 ret = flow_dv_validate_action_mark(dev, act,
17031                                                            action_flags,
17032                                                            attr, &flow_err);
17033                                 if (ret < 0)
17034                                         return -rte_mtr_error_set(error,
17035                                         ENOTSUP,
17036                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17037                                         NULL, flow_err.message ?
17038                                         flow_err.message :
17039                                         "Mark action validate check fail");
17040                                 if (dev_conf->dv_xmeta_en !=
17041                                         MLX5_XMETA_MODE_LEGACY)
17042                                         return -rte_mtr_error_set(error,
17043                                         ENOTSUP,
17044                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17045                                         NULL, "Extend MARK action is "
17046                                         "not supported. Please try use "
17047                                         "default policy for meter.");
17048                                 action_flags |= MLX5_FLOW_ACTION_MARK;
17049                                 ++actions_n;
17050                                 break;
17051                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
17052                                 ret = flow_dv_validate_action_set_tag(dev,
17053                                                         act, action_flags,
17054                                                         attr, &flow_err);
17055                                 if (ret)
17056                                         return -rte_mtr_error_set(error,
17057                                         ENOTSUP,
17058                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17059                                         NULL, flow_err.message ?
17060                                         flow_err.message :
17061                                         "Set tag action validate check fail");
17062                                 /*
17063                                  * Count all modify-header actions
17064                                  * as one action.
17065                                  */
17066                                 if (!(action_flags &
17067                                         MLX5_FLOW_MODIFY_HDR_ACTIONS))
17068                                         ++actions_n;
17069                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
17070                                 break;
17071                         case RTE_FLOW_ACTION_TYPE_DROP:
17072                                 ret = mlx5_flow_validate_action_drop
17073                                         (action_flags,
17074                                         attr, &flow_err);
17075                                 if (ret < 0)
17076                                         return -rte_mtr_error_set(error,
17077                                         ENOTSUP,
17078                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17079                                         NULL, flow_err.message ?
17080                                         flow_err.message :
17081                                         "Drop action validate check fail");
17082                                 action_flags |= MLX5_FLOW_ACTION_DROP;
17083                                 ++actions_n;
17084                                 break;
17085                         case RTE_FLOW_ACTION_TYPE_QUEUE:
17086                                 /*
17087                                  * Check whether extensive
17088                                  * metadata feature is engaged.
17089                                  */
17090                                 if (dev_conf->dv_flow_en &&
17091                                         (dev_conf->dv_xmeta_en !=
17092                                         MLX5_XMETA_MODE_LEGACY) &&
17093                                         mlx5_flow_ext_mreg_supported(dev))
17094                                         return -rte_mtr_error_set(error,
17095                                           ENOTSUP,
17096                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17097                                           NULL, "Queue action with meta "
17098                                           "is not supported. Please try use "
17099                                           "default policy for meter.");
17100                                 ret = mlx5_flow_validate_action_queue(act,
17101                                                         action_flags, dev,
17102                                                         attr, &flow_err);
17103                                 if (ret < 0)
17104                                         return -rte_mtr_error_set(error,
17105                                           ENOTSUP,
17106                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17107                                           NULL, flow_err.message ?
17108                                           flow_err.message :
17109                                           "Queue action validate check fail");
17110                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
17111                                 ++actions_n;
17112                                 break;
17113                         case RTE_FLOW_ACTION_TYPE_RSS:
17114                                 if (dev_conf->dv_flow_en &&
17115                                         (dev_conf->dv_xmeta_en !=
17116                                         MLX5_XMETA_MODE_LEGACY) &&
17117                                         mlx5_flow_ext_mreg_supported(dev))
17118                                         return -rte_mtr_error_set(error,
17119                                           ENOTSUP,
17120                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17121                                           NULL, "RSS action with meta "
17122                                           "is not supported. Please try use "
17123                                           "default policy for meter.");
17124                                 ret = mlx5_validate_action_rss(dev, act,
17125                                                 &flow_err);
17126                                 if (ret < 0)
17127                                         return -rte_mtr_error_set(error,
17128                                           ENOTSUP,
17129                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17130                                           NULL, flow_err.message ?
17131                                           flow_err.message :
17132                                           "RSS action validate check fail");
17133                                 action_flags |= MLX5_FLOW_ACTION_RSS;
17134                                 ++actions_n;
17135                                 *is_rss = true;
17136                                 break;
17137                         case RTE_FLOW_ACTION_TYPE_JUMP:
17138                                 ret = flow_dv_validate_action_jump(dev,
17139                                         NULL, act, action_flags,
17140                                         attr, true, &flow_err);
17141                                 if (ret)
17142                                         return -rte_mtr_error_set(error,
17143                                           ENOTSUP,
17144                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17145                                           NULL, flow_err.message ?
17146                                           flow_err.message :
17147                                           "Jump action validate check fail");
17148                                 ++actions_n;
17149                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
17150                                 break;
17151                         case RTE_FLOW_ACTION_TYPE_METER:
17152                                 if (i != RTE_COLOR_GREEN)
17153                                         return -rte_mtr_error_set(error,
17154                                                 ENOTSUP,
17155                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17156                                                 NULL, flow_err.message ?
17157                                                 flow_err.message :
17158                                   "Meter hierarchy only supports GREEN color.");
17159                                 mtr = act->conf;
17160                                 ret = flow_dv_validate_policy_mtr_hierarchy(dev,
17161                                                         mtr->mtr_id,
17162                                                         action_flags,
17163                                                         is_rss,
17164                                                         &hierarchy_domain,
17165                                                         error);
17166                                 if (ret)
17167                                         return ret;
17168                                 ++actions_n;
17169                                 action_flags |=
17170                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
17171                                 break;
17172                         default:
17173                                 return -rte_mtr_error_set(error, ENOTSUP,
17174                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17175                                         NULL,
17176                                         "Doesn't support optional action");
17177                         }
17178                 }
17179                 /* Yellow is not supported, just skip. */
17180                 if (i == RTE_COLOR_YELLOW)
17181                         continue;
17182                 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
17183                         domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
17184                 else if ((action_flags &
17185                         (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
17186                         (action_flags & MLX5_FLOW_ACTION_MARK))
17187                         /*
17188                          * Only support MLX5_XMETA_MODE_LEGACY
17189                          * so MARK action only in ingress domain.
17190                          */
17191                         domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
17192                 else if (action_flags &
17193                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
17194                         domain_color[i] = hierarchy_domain;
17195                 else
17196                         domain_color[i] = def_domain;
17197                 /*
17198                  * Validate the drop action mutual exclusion
17199                  * with other actions. Drop action is mutually-exclusive
17200                  * with any other action, except for Count action.
17201                  */
17202                 if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
17203                         (action_flags & ~MLX5_FLOW_ACTION_DROP)) {
17204                         return -rte_mtr_error_set(error, ENOTSUP,
17205                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17206                                 NULL, "Drop action is mutually-exclusive "
17207                                 "with any other action");
17208                 }
17209                 /* Eswitch has few restrictions on using items and actions */
17210                 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
17211                         if (!mlx5_flow_ext_mreg_supported(dev) &&
17212                                 action_flags & MLX5_FLOW_ACTION_MARK)
17213                                 return -rte_mtr_error_set(error, ENOTSUP,
17214                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17215                                         NULL, "unsupported action MARK");
17216                         if (action_flags & MLX5_FLOW_ACTION_QUEUE)
17217                                 return -rte_mtr_error_set(error, ENOTSUP,
17218                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17219                                         NULL, "unsupported action QUEUE");
17220                         if (action_flags & MLX5_FLOW_ACTION_RSS)
17221                                 return -rte_mtr_error_set(error, ENOTSUP,
17222                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17223                                         NULL, "unsupported action RSS");
17224                         if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
17225                                 return -rte_mtr_error_set(error, ENOTSUP,
17226                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17227                                         NULL, "no fate action is found");
17228                 } else {
17229                         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) &&
17230                                 (domain_color[i] &
17231                                 MLX5_MTR_DOMAIN_INGRESS_BIT)) {
17232                                 if ((domain_color[i] &
17233                                         MLX5_MTR_DOMAIN_EGRESS_BIT))
17234                                         domain_color[i] =
17235                                         MLX5_MTR_DOMAIN_EGRESS_BIT;
17236                                 else
17237                                         return -rte_mtr_error_set(error,
17238                                         ENOTSUP,
17239                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17240                                         NULL, "no fate action is found");
17241                         }
17242                 }
17243                 if (domain_color[i] != def_domain)
17244                         *domain_bitmap = domain_color[i];
17245         }
17246         return 0;
17247 }
17248
17249 static int
17250 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
17251 {
17252         struct mlx5_priv *priv = dev->data->dev_private;
17253         int ret = 0;
17254
17255         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
17256                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
17257                                                 flags);
17258                 if (ret != 0)
17259                         return ret;
17260         }
17261         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
17262                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
17263                 if (ret != 0)
17264                         return ret;
17265         }
17266         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
17267                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
17268                 if (ret != 0)
17269                         return ret;
17270         }
17271         return 0;
17272 }
17273
17274 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
17275         .validate = flow_dv_validate,
17276         .prepare = flow_dv_prepare,
17277         .translate = flow_dv_translate,
17278         .apply = flow_dv_apply,
17279         .remove = flow_dv_remove,
17280         .destroy = flow_dv_destroy,
17281         .query = flow_dv_query,
17282         .create_mtr_tbls = flow_dv_create_mtr_tbls,
17283         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
17284         .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
17285         .create_meter = flow_dv_mtr_alloc,
17286         .free_meter = flow_dv_aso_mtr_release_to_pool,
17287         .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
17288         .create_mtr_acts = flow_dv_create_mtr_policy_acts,
17289         .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
17290         .create_policy_rules = flow_dv_create_policy_rules,
17291         .destroy_policy_rules = flow_dv_destroy_policy_rules,
17292         .create_def_policy = flow_dv_create_def_policy,
17293         .destroy_def_policy = flow_dv_destroy_def_policy,
17294         .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
17295         .meter_hierarchy_rule_create = flow_dv_meter_hierarchy_rule_create,
17296         .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
17297         .counter_alloc = flow_dv_counter_allocate,
17298         .counter_free = flow_dv_counter_free,
17299         .counter_query = flow_dv_counter_query,
17300         .get_aged_flows = flow_get_aged_flows,
17301         .action_validate = flow_dv_action_validate,
17302         .action_create = flow_dv_action_create,
17303         .action_destroy = flow_dv_action_destroy,
17304         .action_update = flow_dv_action_update,
17305         .action_query = flow_dv_action_query,
17306         .sync_domain = flow_dv_sync_domain,
17307 };
17308
17309 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
17310