net/mlx5: move header modify allocator to ipool
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24 #include <rte_mtr.h>
25 #include <rte_mtr_driver.h>
26 #include <rte_tailq.h>
27
28 #include <mlx5_glue.h>
29 #include <mlx5_devx_cmds.h>
30 #include <mlx5_prm.h>
31 #include <mlx5_malloc.h>
32
33 #include "mlx5_defs.h"
34 #include "mlx5.h"
35 #include "mlx5_common_os.h"
36 #include "mlx5_flow.h"
37 #include "mlx5_flow_os.h"
38 #include "mlx5_rx.h"
39 #include "mlx5_tx.h"
40 #include "rte_pmd_mlx5.h"
41
42 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
43
44 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
45 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
46 #endif
47
48 #ifndef HAVE_MLX5DV_DR_ESWITCH
49 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
50 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
51 #endif
52 #endif
53
54 #ifndef HAVE_MLX5DV_DR
55 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
56 #endif
57
58 /* VLAN header definitions */
59 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
60 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
61 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
62 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
63 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
64
65 union flow_dv_attr {
66         struct {
67                 uint32_t valid:1;
68                 uint32_t ipv4:1;
69                 uint32_t ipv6:1;
70                 uint32_t tcp:1;
71                 uint32_t udp:1;
72                 uint32_t reserved:27;
73         };
74         uint32_t attr;
75 };
76
77 static int
78 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
79                              struct mlx5_flow_tbl_resource *tbl);
80
81 static int
82 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
83                                      uint32_t encap_decap_idx);
84
85 static int
86 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
87                                         uint32_t port_id);
88 static void
89 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
90
91 static int
92 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
93                                   uint32_t rix_jump);
94
95 /**
96  * Initialize flow attributes structure according to flow items' types.
97  *
98  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
99  * mode. For tunnel mode, the items to be modified are the outermost ones.
100  *
101  * @param[in] item
102  *   Pointer to item specification.
103  * @param[out] attr
104  *   Pointer to flow attributes structure.
105  * @param[in] dev_flow
106  *   Pointer to the sub flow.
107  * @param[in] tunnel_decap
108  *   Whether action is after tunnel decapsulation.
109  */
110 static void
111 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
112                   struct mlx5_flow *dev_flow, bool tunnel_decap)
113 {
114         uint64_t layers = dev_flow->handle->layers;
115
116         /*
117          * If layers is already initialized, it means this dev_flow is the
118          * suffix flow, the layers flags is set by the prefix flow. Need to
119          * use the layer flags from prefix flow as the suffix flow may not
120          * have the user defined items as the flow is split.
121          */
122         if (layers) {
123                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
124                         attr->ipv4 = 1;
125                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
126                         attr->ipv6 = 1;
127                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
128                         attr->tcp = 1;
129                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
130                         attr->udp = 1;
131                 attr->valid = 1;
132                 return;
133         }
134         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
135                 uint8_t next_protocol = 0xff;
136                 switch (item->type) {
137                 case RTE_FLOW_ITEM_TYPE_GRE:
138                 case RTE_FLOW_ITEM_TYPE_NVGRE:
139                 case RTE_FLOW_ITEM_TYPE_VXLAN:
140                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
141                 case RTE_FLOW_ITEM_TYPE_GENEVE:
142                 case RTE_FLOW_ITEM_TYPE_MPLS:
143                         if (tunnel_decap)
144                                 attr->attr = 0;
145                         break;
146                 case RTE_FLOW_ITEM_TYPE_IPV4:
147                         if (!attr->ipv6)
148                                 attr->ipv4 = 1;
149                         if (item->mask != NULL &&
150                             ((const struct rte_flow_item_ipv4 *)
151                             item->mask)->hdr.next_proto_id)
152                                 next_protocol =
153                                     ((const struct rte_flow_item_ipv4 *)
154                                       (item->spec))->hdr.next_proto_id &
155                                     ((const struct rte_flow_item_ipv4 *)
156                                       (item->mask))->hdr.next_proto_id;
157                         if ((next_protocol == IPPROTO_IPIP ||
158                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
159                                 attr->attr = 0;
160                         break;
161                 case RTE_FLOW_ITEM_TYPE_IPV6:
162                         if (!attr->ipv4)
163                                 attr->ipv6 = 1;
164                         if (item->mask != NULL &&
165                             ((const struct rte_flow_item_ipv6 *)
166                             item->mask)->hdr.proto)
167                                 next_protocol =
168                                     ((const struct rte_flow_item_ipv6 *)
169                                       (item->spec))->hdr.proto &
170                                     ((const struct rte_flow_item_ipv6 *)
171                                       (item->mask))->hdr.proto;
172                         if ((next_protocol == IPPROTO_IPIP ||
173                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
174                                 attr->attr = 0;
175                         break;
176                 case RTE_FLOW_ITEM_TYPE_UDP:
177                         if (!attr->tcp)
178                                 attr->udp = 1;
179                         break;
180                 case RTE_FLOW_ITEM_TYPE_TCP:
181                         if (!attr->udp)
182                                 attr->tcp = 1;
183                         break;
184                 default:
185                         break;
186                 }
187         }
188         attr->valid = 1;
189 }
190
191 /**
192  * Convert rte_mtr_color to mlx5 color.
193  *
194  * @param[in] rcol
195  *   rte_mtr_color.
196  *
197  * @return
198  *   mlx5 color.
199  */
200 static int
201 rte_col_2_mlx5_col(enum rte_color rcol)
202 {
203         switch (rcol) {
204         case RTE_COLOR_GREEN:
205                 return MLX5_FLOW_COLOR_GREEN;
206         case RTE_COLOR_YELLOW:
207                 return MLX5_FLOW_COLOR_YELLOW;
208         case RTE_COLOR_RED:
209                 return MLX5_FLOW_COLOR_RED;
210         default:
211                 break;
212         }
213         return MLX5_FLOW_COLOR_UNDEFINED;
214 }
215
216 struct field_modify_info {
217         uint32_t size; /* Size of field in protocol header, in bytes. */
218         uint32_t offset; /* Offset of field in protocol header, in bytes. */
219         enum mlx5_modification_field id;
220 };
221
222 struct field_modify_info modify_eth[] = {
223         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
224         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
225         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
226         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
227         {0, 0, 0},
228 };
229
230 struct field_modify_info modify_vlan_out_first_vid[] = {
231         /* Size in bits !!! */
232         {12, 0, MLX5_MODI_OUT_FIRST_VID},
233         {0, 0, 0},
234 };
235
236 struct field_modify_info modify_ipv4[] = {
237         {1,  1, MLX5_MODI_OUT_IP_DSCP},
238         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
239         {4, 12, MLX5_MODI_OUT_SIPV4},
240         {4, 16, MLX5_MODI_OUT_DIPV4},
241         {0, 0, 0},
242 };
243
244 struct field_modify_info modify_ipv6[] = {
245         {1,  0, MLX5_MODI_OUT_IP_DSCP},
246         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
247         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
248         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
249         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
250         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
251         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
252         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
253         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
254         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
255         {0, 0, 0},
256 };
257
258 struct field_modify_info modify_udp[] = {
259         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
260         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
261         {0, 0, 0},
262 };
263
264 struct field_modify_info modify_tcp[] = {
265         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
266         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
267         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
268         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
269         {0, 0, 0},
270 };
271
272 static const struct rte_flow_item *
273 mlx5_flow_find_tunnel_item(const struct rte_flow_item *item)
274 {
275         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
276                 switch (item->type) {
277                 default:
278                         break;
279                 case RTE_FLOW_ITEM_TYPE_VXLAN:
280                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
281                 case RTE_FLOW_ITEM_TYPE_GRE:
282                 case RTE_FLOW_ITEM_TYPE_MPLS:
283                 case RTE_FLOW_ITEM_TYPE_NVGRE:
284                 case RTE_FLOW_ITEM_TYPE_GENEVE:
285                         return item;
286                 case RTE_FLOW_ITEM_TYPE_IPV4:
287                 case RTE_FLOW_ITEM_TYPE_IPV6:
288                         if (item[1].type == RTE_FLOW_ITEM_TYPE_IPV4 ||
289                             item[1].type == RTE_FLOW_ITEM_TYPE_IPV6)
290                                 return item;
291                         break;
292                 }
293         }
294         return NULL;
295 }
296
297 static void
298 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
299                           uint8_t next_protocol, uint64_t *item_flags,
300                           int *tunnel)
301 {
302         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
303                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
304         if (next_protocol == IPPROTO_IPIP) {
305                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
306                 *tunnel = 1;
307         }
308         if (next_protocol == IPPROTO_IPV6) {
309                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
310                 *tunnel = 1;
311         }
312 }
313
314 /* Update VLAN's VID/PCP based on input rte_flow_action.
315  *
316  * @param[in] action
317  *   Pointer to struct rte_flow_action.
318  * @param[out] vlan
319  *   Pointer to struct rte_vlan_hdr.
320  */
321 static void
322 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
323                          struct rte_vlan_hdr *vlan)
324 {
325         uint16_t vlan_tci;
326         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
327                 vlan_tci =
328                     ((const struct rte_flow_action_of_set_vlan_pcp *)
329                                                action->conf)->vlan_pcp;
330                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
331                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
332                 vlan->vlan_tci |= vlan_tci;
333         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
334                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
335                 vlan->vlan_tci |= rte_be_to_cpu_16
336                     (((const struct rte_flow_action_of_set_vlan_vid *)
337                                              action->conf)->vlan_vid);
338         }
339 }
340
341 /**
342  * Fetch 1, 2, 3 or 4 byte field from the byte array
343  * and return as unsigned integer in host-endian format.
344  *
345  * @param[in] data
346  *   Pointer to data array.
347  * @param[in] size
348  *   Size of field to extract.
349  *
350  * @return
351  *   converted field in host endian format.
352  */
353 static inline uint32_t
354 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
355 {
356         uint32_t ret;
357
358         switch (size) {
359         case 1:
360                 ret = *data;
361                 break;
362         case 2:
363                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
364                 break;
365         case 3:
366                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
367                 ret = (ret << 8) | *(data + sizeof(uint16_t));
368                 break;
369         case 4:
370                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
371                 break;
372         default:
373                 MLX5_ASSERT(false);
374                 ret = 0;
375                 break;
376         }
377         return ret;
378 }
379
380 /**
381  * Convert modify-header action to DV specification.
382  *
383  * Data length of each action is determined by provided field description
384  * and the item mask. Data bit offset and width of each action is determined
385  * by provided item mask.
386  *
387  * @param[in] item
388  *   Pointer to item specification.
389  * @param[in] field
390  *   Pointer to field modification information.
391  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
392  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
393  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
394  * @param[in] dcopy
395  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
396  *   Negative offset value sets the same offset as source offset.
397  *   size field is ignored, value is taken from source field.
398  * @param[in,out] resource
399  *   Pointer to the modify-header resource.
400  * @param[in] type
401  *   Type of modification.
402  * @param[out] error
403  *   Pointer to the error structure.
404  *
405  * @return
406  *   0 on success, a negative errno value otherwise and rte_errno is set.
407  */
408 static int
409 flow_dv_convert_modify_action(struct rte_flow_item *item,
410                               struct field_modify_info *field,
411                               struct field_modify_info *dcopy,
412                               struct mlx5_flow_dv_modify_hdr_resource *resource,
413                               uint32_t type, struct rte_flow_error *error)
414 {
415         uint32_t i = resource->actions_num;
416         struct mlx5_modification_cmd *actions = resource->actions;
417         uint32_t carry_b = 0;
418
419         /*
420          * The item and mask are provided in big-endian format.
421          * The fields should be presented as in big-endian format either.
422          * Mask must be always present, it defines the actual field width.
423          */
424         MLX5_ASSERT(item->mask);
425         MLX5_ASSERT(field->size);
426         do {
427                 uint32_t size_b;
428                 uint32_t off_b;
429                 uint32_t mask;
430                 uint32_t data;
431                 bool next_field = true;
432                 bool next_dcopy = true;
433
434                 if (i >= MLX5_MAX_MODIFY_NUM)
435                         return rte_flow_error_set(error, EINVAL,
436                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
437                                  "too many items to modify");
438                 /* Fetch variable byte size mask from the array. */
439                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
440                                            field->offset, field->size);
441                 if (!mask) {
442                         ++field;
443                         continue;
444                 }
445                 /* Deduce actual data width in bits from mask value. */
446                 off_b = rte_bsf32(mask) + carry_b;
447                 size_b = sizeof(uint32_t) * CHAR_BIT -
448                          off_b - __builtin_clz(mask);
449                 MLX5_ASSERT(size_b);
450                 actions[i] = (struct mlx5_modification_cmd) {
451                         .action_type = type,
452                         .field = field->id,
453                         .offset = off_b,
454                         .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ?
455                                 0 : size_b,
456                 };
457                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
458                         MLX5_ASSERT(dcopy);
459                         actions[i].dst_field = dcopy->id;
460                         actions[i].dst_offset =
461                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
462                         /* Convert entire record to big-endian format. */
463                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
464                         /*
465                          * Destination field overflow. Copy leftovers of
466                          * a source field to the next destination field.
467                          */
468                         carry_b = 0;
469                         if ((size_b > dcopy->size * CHAR_BIT - dcopy->offset) &&
470                             dcopy->size != 0) {
471                                 actions[i].length =
472                                         dcopy->size * CHAR_BIT - dcopy->offset;
473                                 carry_b = actions[i].length;
474                                 next_field = false;
475                         }
476                         /*
477                          * Not enough bits in a source filed to fill a
478                          * destination field. Switch to the next source.
479                          */
480                         if ((size_b < dcopy->size * CHAR_BIT - dcopy->offset) &&
481                             (size_b == field->size * CHAR_BIT - off_b)) {
482                                 actions[i].length =
483                                         field->size * CHAR_BIT - off_b;
484                                 dcopy->offset += actions[i].length;
485                                 next_dcopy = false;
486                         }
487                         if (next_dcopy)
488                                 ++dcopy;
489                 } else {
490                         MLX5_ASSERT(item->spec);
491                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
492                                                    field->offset, field->size);
493                         /* Shift out the trailing masked bits from data. */
494                         data = (data & mask) >> off_b;
495                         actions[i].data1 = rte_cpu_to_be_32(data);
496                 }
497                 /* Convert entire record to expected big-endian format. */
498                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
499                 if (next_field)
500                         ++field;
501                 ++i;
502         } while (field->size);
503         if (resource->actions_num == i)
504                 return rte_flow_error_set(error, EINVAL,
505                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
506                                           "invalid modification flow item");
507         resource->actions_num = i;
508         return 0;
509 }
510
511 /**
512  * Convert modify-header set IPv4 address action to DV specification.
513  *
514  * @param[in,out] resource
515  *   Pointer to the modify-header resource.
516  * @param[in] action
517  *   Pointer to action specification.
518  * @param[out] error
519  *   Pointer to the error structure.
520  *
521  * @return
522  *   0 on success, a negative errno value otherwise and rte_errno is set.
523  */
524 static int
525 flow_dv_convert_action_modify_ipv4
526                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
527                          const struct rte_flow_action *action,
528                          struct rte_flow_error *error)
529 {
530         const struct rte_flow_action_set_ipv4 *conf =
531                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
532         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
533         struct rte_flow_item_ipv4 ipv4;
534         struct rte_flow_item_ipv4 ipv4_mask;
535
536         memset(&ipv4, 0, sizeof(ipv4));
537         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
538         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
539                 ipv4.hdr.src_addr = conf->ipv4_addr;
540                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
541         } else {
542                 ipv4.hdr.dst_addr = conf->ipv4_addr;
543                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
544         }
545         item.spec = &ipv4;
546         item.mask = &ipv4_mask;
547         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
548                                              MLX5_MODIFICATION_TYPE_SET, error);
549 }
550
551 /**
552  * Convert modify-header set IPv6 address action to DV specification.
553  *
554  * @param[in,out] resource
555  *   Pointer to the modify-header resource.
556  * @param[in] action
557  *   Pointer to action specification.
558  * @param[out] error
559  *   Pointer to the error structure.
560  *
561  * @return
562  *   0 on success, a negative errno value otherwise and rte_errno is set.
563  */
564 static int
565 flow_dv_convert_action_modify_ipv6
566                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
567                          const struct rte_flow_action *action,
568                          struct rte_flow_error *error)
569 {
570         const struct rte_flow_action_set_ipv6 *conf =
571                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
572         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
573         struct rte_flow_item_ipv6 ipv6;
574         struct rte_flow_item_ipv6 ipv6_mask;
575
576         memset(&ipv6, 0, sizeof(ipv6));
577         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
578         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
579                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
580                        sizeof(ipv6.hdr.src_addr));
581                 memcpy(&ipv6_mask.hdr.src_addr,
582                        &rte_flow_item_ipv6_mask.hdr.src_addr,
583                        sizeof(ipv6.hdr.src_addr));
584         } else {
585                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
586                        sizeof(ipv6.hdr.dst_addr));
587                 memcpy(&ipv6_mask.hdr.dst_addr,
588                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
589                        sizeof(ipv6.hdr.dst_addr));
590         }
591         item.spec = &ipv6;
592         item.mask = &ipv6_mask;
593         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
594                                              MLX5_MODIFICATION_TYPE_SET, error);
595 }
596
597 /**
598  * Convert modify-header set MAC address action to DV specification.
599  *
600  * @param[in,out] resource
601  *   Pointer to the modify-header resource.
602  * @param[in] action
603  *   Pointer to action specification.
604  * @param[out] error
605  *   Pointer to the error structure.
606  *
607  * @return
608  *   0 on success, a negative errno value otherwise and rte_errno is set.
609  */
610 static int
611 flow_dv_convert_action_modify_mac
612                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
613                          const struct rte_flow_action *action,
614                          struct rte_flow_error *error)
615 {
616         const struct rte_flow_action_set_mac *conf =
617                 (const struct rte_flow_action_set_mac *)(action->conf);
618         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
619         struct rte_flow_item_eth eth;
620         struct rte_flow_item_eth eth_mask;
621
622         memset(&eth, 0, sizeof(eth));
623         memset(&eth_mask, 0, sizeof(eth_mask));
624         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
625                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
626                        sizeof(eth.src.addr_bytes));
627                 memcpy(&eth_mask.src.addr_bytes,
628                        &rte_flow_item_eth_mask.src.addr_bytes,
629                        sizeof(eth_mask.src.addr_bytes));
630         } else {
631                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
632                        sizeof(eth.dst.addr_bytes));
633                 memcpy(&eth_mask.dst.addr_bytes,
634                        &rte_flow_item_eth_mask.dst.addr_bytes,
635                        sizeof(eth_mask.dst.addr_bytes));
636         }
637         item.spec = &eth;
638         item.mask = &eth_mask;
639         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
640                                              MLX5_MODIFICATION_TYPE_SET, error);
641 }
642
643 /**
644  * Convert modify-header set VLAN VID action to DV specification.
645  *
646  * @param[in,out] resource
647  *   Pointer to the modify-header resource.
648  * @param[in] action
649  *   Pointer to action specification.
650  * @param[out] error
651  *   Pointer to the error structure.
652  *
653  * @return
654  *   0 on success, a negative errno value otherwise and rte_errno is set.
655  */
656 static int
657 flow_dv_convert_action_modify_vlan_vid
658                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
659                          const struct rte_flow_action *action,
660                          struct rte_flow_error *error)
661 {
662         const struct rte_flow_action_of_set_vlan_vid *conf =
663                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
664         int i = resource->actions_num;
665         struct mlx5_modification_cmd *actions = resource->actions;
666         struct field_modify_info *field = modify_vlan_out_first_vid;
667
668         if (i >= MLX5_MAX_MODIFY_NUM)
669                 return rte_flow_error_set(error, EINVAL,
670                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
671                          "too many items to modify");
672         actions[i] = (struct mlx5_modification_cmd) {
673                 .action_type = MLX5_MODIFICATION_TYPE_SET,
674                 .field = field->id,
675                 .length = field->size,
676                 .offset = field->offset,
677         };
678         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
679         actions[i].data1 = conf->vlan_vid;
680         actions[i].data1 = actions[i].data1 << 16;
681         resource->actions_num = ++i;
682         return 0;
683 }
684
685 /**
686  * Convert modify-header set TP action to DV specification.
687  *
688  * @param[in,out] resource
689  *   Pointer to the modify-header resource.
690  * @param[in] action
691  *   Pointer to action specification.
692  * @param[in] items
693  *   Pointer to rte_flow_item objects list.
694  * @param[in] attr
695  *   Pointer to flow attributes structure.
696  * @param[in] dev_flow
697  *   Pointer to the sub flow.
698  * @param[in] tunnel_decap
699  *   Whether action is after tunnel decapsulation.
700  * @param[out] error
701  *   Pointer to the error structure.
702  *
703  * @return
704  *   0 on success, a negative errno value otherwise and rte_errno is set.
705  */
706 static int
707 flow_dv_convert_action_modify_tp
708                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
709                          const struct rte_flow_action *action,
710                          const struct rte_flow_item *items,
711                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
712                          bool tunnel_decap, struct rte_flow_error *error)
713 {
714         const struct rte_flow_action_set_tp *conf =
715                 (const struct rte_flow_action_set_tp *)(action->conf);
716         struct rte_flow_item item;
717         struct rte_flow_item_udp udp;
718         struct rte_flow_item_udp udp_mask;
719         struct rte_flow_item_tcp tcp;
720         struct rte_flow_item_tcp tcp_mask;
721         struct field_modify_info *field;
722
723         if (!attr->valid)
724                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
725         if (attr->udp) {
726                 memset(&udp, 0, sizeof(udp));
727                 memset(&udp_mask, 0, sizeof(udp_mask));
728                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
729                         udp.hdr.src_port = conf->port;
730                         udp_mask.hdr.src_port =
731                                         rte_flow_item_udp_mask.hdr.src_port;
732                 } else {
733                         udp.hdr.dst_port = conf->port;
734                         udp_mask.hdr.dst_port =
735                                         rte_flow_item_udp_mask.hdr.dst_port;
736                 }
737                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
738                 item.spec = &udp;
739                 item.mask = &udp_mask;
740                 field = modify_udp;
741         } else {
742                 MLX5_ASSERT(attr->tcp);
743                 memset(&tcp, 0, sizeof(tcp));
744                 memset(&tcp_mask, 0, sizeof(tcp_mask));
745                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
746                         tcp.hdr.src_port = conf->port;
747                         tcp_mask.hdr.src_port =
748                                         rte_flow_item_tcp_mask.hdr.src_port;
749                 } else {
750                         tcp.hdr.dst_port = conf->port;
751                         tcp_mask.hdr.dst_port =
752                                         rte_flow_item_tcp_mask.hdr.dst_port;
753                 }
754                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
755                 item.spec = &tcp;
756                 item.mask = &tcp_mask;
757                 field = modify_tcp;
758         }
759         return flow_dv_convert_modify_action(&item, field, NULL, resource,
760                                              MLX5_MODIFICATION_TYPE_SET, error);
761 }
762
763 /**
764  * Convert modify-header set TTL action to DV specification.
765  *
766  * @param[in,out] resource
767  *   Pointer to the modify-header resource.
768  * @param[in] action
769  *   Pointer to action specification.
770  * @param[in] items
771  *   Pointer to rte_flow_item objects list.
772  * @param[in] attr
773  *   Pointer to flow attributes structure.
774  * @param[in] dev_flow
775  *   Pointer to the sub flow.
776  * @param[in] tunnel_decap
777  *   Whether action is after tunnel decapsulation.
778  * @param[out] error
779  *   Pointer to the error structure.
780  *
781  * @return
782  *   0 on success, a negative errno value otherwise and rte_errno is set.
783  */
784 static int
785 flow_dv_convert_action_modify_ttl
786                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
787                          const struct rte_flow_action *action,
788                          const struct rte_flow_item *items,
789                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
790                          bool tunnel_decap, struct rte_flow_error *error)
791 {
792         const struct rte_flow_action_set_ttl *conf =
793                 (const struct rte_flow_action_set_ttl *)(action->conf);
794         struct rte_flow_item item;
795         struct rte_flow_item_ipv4 ipv4;
796         struct rte_flow_item_ipv4 ipv4_mask;
797         struct rte_flow_item_ipv6 ipv6;
798         struct rte_flow_item_ipv6 ipv6_mask;
799         struct field_modify_info *field;
800
801         if (!attr->valid)
802                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
803         if (attr->ipv4) {
804                 memset(&ipv4, 0, sizeof(ipv4));
805                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
806                 ipv4.hdr.time_to_live = conf->ttl_value;
807                 ipv4_mask.hdr.time_to_live = 0xFF;
808                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
809                 item.spec = &ipv4;
810                 item.mask = &ipv4_mask;
811                 field = modify_ipv4;
812         } else {
813                 MLX5_ASSERT(attr->ipv6);
814                 memset(&ipv6, 0, sizeof(ipv6));
815                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
816                 ipv6.hdr.hop_limits = conf->ttl_value;
817                 ipv6_mask.hdr.hop_limits = 0xFF;
818                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
819                 item.spec = &ipv6;
820                 item.mask = &ipv6_mask;
821                 field = modify_ipv6;
822         }
823         return flow_dv_convert_modify_action(&item, field, NULL, resource,
824                                              MLX5_MODIFICATION_TYPE_SET, error);
825 }
826
827 /**
828  * Convert modify-header decrement TTL action to DV specification.
829  *
830  * @param[in,out] resource
831  *   Pointer to the modify-header resource.
832  * @param[in] action
833  *   Pointer to action specification.
834  * @param[in] items
835  *   Pointer to rte_flow_item objects list.
836  * @param[in] attr
837  *   Pointer to flow attributes structure.
838  * @param[in] dev_flow
839  *   Pointer to the sub flow.
840  * @param[in] tunnel_decap
841  *   Whether action is after tunnel decapsulation.
842  * @param[out] error
843  *   Pointer to the error structure.
844  *
845  * @return
846  *   0 on success, a negative errno value otherwise and rte_errno is set.
847  */
848 static int
849 flow_dv_convert_action_modify_dec_ttl
850                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
851                          const struct rte_flow_item *items,
852                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
853                          bool tunnel_decap, struct rte_flow_error *error)
854 {
855         struct rte_flow_item item;
856         struct rte_flow_item_ipv4 ipv4;
857         struct rte_flow_item_ipv4 ipv4_mask;
858         struct rte_flow_item_ipv6 ipv6;
859         struct rte_flow_item_ipv6 ipv6_mask;
860         struct field_modify_info *field;
861
862         if (!attr->valid)
863                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
864         if (attr->ipv4) {
865                 memset(&ipv4, 0, sizeof(ipv4));
866                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
867                 ipv4.hdr.time_to_live = 0xFF;
868                 ipv4_mask.hdr.time_to_live = 0xFF;
869                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
870                 item.spec = &ipv4;
871                 item.mask = &ipv4_mask;
872                 field = modify_ipv4;
873         } else {
874                 MLX5_ASSERT(attr->ipv6);
875                 memset(&ipv6, 0, sizeof(ipv6));
876                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
877                 ipv6.hdr.hop_limits = 0xFF;
878                 ipv6_mask.hdr.hop_limits = 0xFF;
879                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
880                 item.spec = &ipv6;
881                 item.mask = &ipv6_mask;
882                 field = modify_ipv6;
883         }
884         return flow_dv_convert_modify_action(&item, field, NULL, resource,
885                                              MLX5_MODIFICATION_TYPE_ADD, error);
886 }
887
888 /**
889  * Convert modify-header increment/decrement TCP Sequence number
890  * to DV specification.
891  *
892  * @param[in,out] resource
893  *   Pointer to the modify-header resource.
894  * @param[in] action
895  *   Pointer to action specification.
896  * @param[out] error
897  *   Pointer to the error structure.
898  *
899  * @return
900  *   0 on success, a negative errno value otherwise and rte_errno is set.
901  */
902 static int
903 flow_dv_convert_action_modify_tcp_seq
904                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
905                          const struct rte_flow_action *action,
906                          struct rte_flow_error *error)
907 {
908         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
909         uint64_t value = rte_be_to_cpu_32(*conf);
910         struct rte_flow_item item;
911         struct rte_flow_item_tcp tcp;
912         struct rte_flow_item_tcp tcp_mask;
913
914         memset(&tcp, 0, sizeof(tcp));
915         memset(&tcp_mask, 0, sizeof(tcp_mask));
916         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
917                 /*
918                  * The HW has no decrement operation, only increment operation.
919                  * To simulate decrement X from Y using increment operation
920                  * we need to add UINT32_MAX X times to Y.
921                  * Each adding of UINT32_MAX decrements Y by 1.
922                  */
923                 value *= UINT32_MAX;
924         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
925         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
926         item.type = RTE_FLOW_ITEM_TYPE_TCP;
927         item.spec = &tcp;
928         item.mask = &tcp_mask;
929         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
930                                              MLX5_MODIFICATION_TYPE_ADD, error);
931 }
932
933 /**
934  * Convert modify-header increment/decrement TCP Acknowledgment number
935  * to DV specification.
936  *
937  * @param[in,out] resource
938  *   Pointer to the modify-header resource.
939  * @param[in] action
940  *   Pointer to action specification.
941  * @param[out] error
942  *   Pointer to the error structure.
943  *
944  * @return
945  *   0 on success, a negative errno value otherwise and rte_errno is set.
946  */
947 static int
948 flow_dv_convert_action_modify_tcp_ack
949                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
950                          const struct rte_flow_action *action,
951                          struct rte_flow_error *error)
952 {
953         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
954         uint64_t value = rte_be_to_cpu_32(*conf);
955         struct rte_flow_item item;
956         struct rte_flow_item_tcp tcp;
957         struct rte_flow_item_tcp tcp_mask;
958
959         memset(&tcp, 0, sizeof(tcp));
960         memset(&tcp_mask, 0, sizeof(tcp_mask));
961         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
962                 /*
963                  * The HW has no decrement operation, only increment operation.
964                  * To simulate decrement X from Y using increment operation
965                  * we need to add UINT32_MAX X times to Y.
966                  * Each adding of UINT32_MAX decrements Y by 1.
967                  */
968                 value *= UINT32_MAX;
969         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
970         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
971         item.type = RTE_FLOW_ITEM_TYPE_TCP;
972         item.spec = &tcp;
973         item.mask = &tcp_mask;
974         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
975                                              MLX5_MODIFICATION_TYPE_ADD, error);
976 }
977
978 static enum mlx5_modification_field reg_to_field[] = {
979         [REG_NON] = MLX5_MODI_OUT_NONE,
980         [REG_A] = MLX5_MODI_META_DATA_REG_A,
981         [REG_B] = MLX5_MODI_META_DATA_REG_B,
982         [REG_C_0] = MLX5_MODI_META_REG_C_0,
983         [REG_C_1] = MLX5_MODI_META_REG_C_1,
984         [REG_C_2] = MLX5_MODI_META_REG_C_2,
985         [REG_C_3] = MLX5_MODI_META_REG_C_3,
986         [REG_C_4] = MLX5_MODI_META_REG_C_4,
987         [REG_C_5] = MLX5_MODI_META_REG_C_5,
988         [REG_C_6] = MLX5_MODI_META_REG_C_6,
989         [REG_C_7] = MLX5_MODI_META_REG_C_7,
990 };
991
992 /**
993  * Convert register set to DV specification.
994  *
995  * @param[in,out] resource
996  *   Pointer to the modify-header resource.
997  * @param[in] action
998  *   Pointer to action specification.
999  * @param[out] error
1000  *   Pointer to the error structure.
1001  *
1002  * @return
1003  *   0 on success, a negative errno value otherwise and rte_errno is set.
1004  */
1005 static int
1006 flow_dv_convert_action_set_reg
1007                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1008                          const struct rte_flow_action *action,
1009                          struct rte_flow_error *error)
1010 {
1011         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
1012         struct mlx5_modification_cmd *actions = resource->actions;
1013         uint32_t i = resource->actions_num;
1014
1015         if (i >= MLX5_MAX_MODIFY_NUM)
1016                 return rte_flow_error_set(error, EINVAL,
1017                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1018                                           "too many items to modify");
1019         MLX5_ASSERT(conf->id != REG_NON);
1020         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
1021         actions[i] = (struct mlx5_modification_cmd) {
1022                 .action_type = MLX5_MODIFICATION_TYPE_SET,
1023                 .field = reg_to_field[conf->id],
1024                 .offset = conf->offset,
1025                 .length = conf->length,
1026         };
1027         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1028         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1029         ++i;
1030         resource->actions_num = i;
1031         return 0;
1032 }
1033
1034 /**
1035  * Convert SET_TAG action to DV specification.
1036  *
1037  * @param[in] dev
1038  *   Pointer to the rte_eth_dev structure.
1039  * @param[in,out] resource
1040  *   Pointer to the modify-header resource.
1041  * @param[in] conf
1042  *   Pointer to action specification.
1043  * @param[out] error
1044  *   Pointer to the error structure.
1045  *
1046  * @return
1047  *   0 on success, a negative errno value otherwise and rte_errno is set.
1048  */
1049 static int
1050 flow_dv_convert_action_set_tag
1051                         (struct rte_eth_dev *dev,
1052                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1053                          const struct rte_flow_action_set_tag *conf,
1054                          struct rte_flow_error *error)
1055 {
1056         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1057         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1058         struct rte_flow_item item = {
1059                 .spec = &data,
1060                 .mask = &mask,
1061         };
1062         struct field_modify_info reg_c_x[] = {
1063                 [1] = {0, 0, 0},
1064         };
1065         enum mlx5_modification_field reg_type;
1066         int ret;
1067
1068         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1069         if (ret < 0)
1070                 return ret;
1071         MLX5_ASSERT(ret != REG_NON);
1072         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1073         reg_type = reg_to_field[ret];
1074         MLX5_ASSERT(reg_type > 0);
1075         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1076         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1077                                              MLX5_MODIFICATION_TYPE_SET, error);
1078 }
1079
1080 /**
1081  * Convert internal COPY_REG action to DV specification.
1082  *
1083  * @param[in] dev
1084  *   Pointer to the rte_eth_dev structure.
1085  * @param[in,out] res
1086  *   Pointer to the modify-header resource.
1087  * @param[in] action
1088  *   Pointer to action specification.
1089  * @param[out] error
1090  *   Pointer to the error structure.
1091  *
1092  * @return
1093  *   0 on success, a negative errno value otherwise and rte_errno is set.
1094  */
1095 static int
1096 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1097                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1098                                  const struct rte_flow_action *action,
1099                                  struct rte_flow_error *error)
1100 {
1101         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1102         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1103         struct rte_flow_item item = {
1104                 .spec = NULL,
1105                 .mask = &mask,
1106         };
1107         struct field_modify_info reg_src[] = {
1108                 {4, 0, reg_to_field[conf->src]},
1109                 {0, 0, 0},
1110         };
1111         struct field_modify_info reg_dst = {
1112                 .offset = 0,
1113                 .id = reg_to_field[conf->dst],
1114         };
1115         /* Adjust reg_c[0] usage according to reported mask. */
1116         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1117                 struct mlx5_priv *priv = dev->data->dev_private;
1118                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1119
1120                 MLX5_ASSERT(reg_c0);
1121                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1122                 if (conf->dst == REG_C_0) {
1123                         /* Copy to reg_c[0], within mask only. */
1124                         reg_dst.offset = rte_bsf32(reg_c0);
1125                         /*
1126                          * Mask is ignoring the enianness, because
1127                          * there is no conversion in datapath.
1128                          */
1129 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1130                         /* Copy from destination lower bits to reg_c[0]. */
1131                         mask = reg_c0 >> reg_dst.offset;
1132 #else
1133                         /* Copy from destination upper bits to reg_c[0]. */
1134                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1135                                           rte_fls_u32(reg_c0));
1136 #endif
1137                 } else {
1138                         mask = rte_cpu_to_be_32(reg_c0);
1139 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1140                         /* Copy from reg_c[0] to destination lower bits. */
1141                         reg_dst.offset = 0;
1142 #else
1143                         /* Copy from reg_c[0] to destination upper bits. */
1144                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1145                                          (rte_fls_u32(reg_c0) -
1146                                           rte_bsf32(reg_c0));
1147 #endif
1148                 }
1149         }
1150         return flow_dv_convert_modify_action(&item,
1151                                              reg_src, &reg_dst, res,
1152                                              MLX5_MODIFICATION_TYPE_COPY,
1153                                              error);
1154 }
1155
1156 /**
1157  * Convert MARK action to DV specification. This routine is used
1158  * in extensive metadata only and requires metadata register to be
1159  * handled. In legacy mode hardware tag resource is engaged.
1160  *
1161  * @param[in] dev
1162  *   Pointer to the rte_eth_dev structure.
1163  * @param[in] conf
1164  *   Pointer to MARK action specification.
1165  * @param[in,out] resource
1166  *   Pointer to the modify-header resource.
1167  * @param[out] error
1168  *   Pointer to the error structure.
1169  *
1170  * @return
1171  *   0 on success, a negative errno value otherwise and rte_errno is set.
1172  */
1173 static int
1174 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1175                             const struct rte_flow_action_mark *conf,
1176                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1177                             struct rte_flow_error *error)
1178 {
1179         struct mlx5_priv *priv = dev->data->dev_private;
1180         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1181                                            priv->sh->dv_mark_mask);
1182         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1183         struct rte_flow_item item = {
1184                 .spec = &data,
1185                 .mask = &mask,
1186         };
1187         struct field_modify_info reg_c_x[] = {
1188                 [1] = {0, 0, 0},
1189         };
1190         int reg;
1191
1192         if (!mask)
1193                 return rte_flow_error_set(error, EINVAL,
1194                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1195                                           NULL, "zero mark action mask");
1196         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1197         if (reg < 0)
1198                 return reg;
1199         MLX5_ASSERT(reg > 0);
1200         if (reg == REG_C_0) {
1201                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1202                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1203
1204                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1205                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1206                 mask = rte_cpu_to_be_32(mask << shl_c0);
1207         }
1208         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1209         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1210                                              MLX5_MODIFICATION_TYPE_SET, error);
1211 }
1212
1213 /**
1214  * Get metadata register index for specified steering domain.
1215  *
1216  * @param[in] dev
1217  *   Pointer to the rte_eth_dev structure.
1218  * @param[in] attr
1219  *   Attributes of flow to determine steering domain.
1220  * @param[out] error
1221  *   Pointer to the error structure.
1222  *
1223  * @return
1224  *   positive index on success, a negative errno value otherwise
1225  *   and rte_errno is set.
1226  */
1227 static enum modify_reg
1228 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1229                          const struct rte_flow_attr *attr,
1230                          struct rte_flow_error *error)
1231 {
1232         int reg =
1233                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1234                                           MLX5_METADATA_FDB :
1235                                             attr->egress ?
1236                                             MLX5_METADATA_TX :
1237                                             MLX5_METADATA_RX, 0, error);
1238         if (reg < 0)
1239                 return rte_flow_error_set(error,
1240                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1241                                           NULL, "unavailable "
1242                                           "metadata register");
1243         return reg;
1244 }
1245
1246 /**
1247  * Convert SET_META action to DV specification.
1248  *
1249  * @param[in] dev
1250  *   Pointer to the rte_eth_dev structure.
1251  * @param[in,out] resource
1252  *   Pointer to the modify-header resource.
1253  * @param[in] attr
1254  *   Attributes of flow that includes this item.
1255  * @param[in] conf
1256  *   Pointer to action specification.
1257  * @param[out] error
1258  *   Pointer to the error structure.
1259  *
1260  * @return
1261  *   0 on success, a negative errno value otherwise and rte_errno is set.
1262  */
1263 static int
1264 flow_dv_convert_action_set_meta
1265                         (struct rte_eth_dev *dev,
1266                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1267                          const struct rte_flow_attr *attr,
1268                          const struct rte_flow_action_set_meta *conf,
1269                          struct rte_flow_error *error)
1270 {
1271         uint32_t mask = rte_cpu_to_be_32(conf->mask);
1272         uint32_t data = rte_cpu_to_be_32(conf->data) & mask;
1273         struct rte_flow_item item = {
1274                 .spec = &data,
1275                 .mask = &mask,
1276         };
1277         struct field_modify_info reg_c_x[] = {
1278                 [1] = {0, 0, 0},
1279         };
1280         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1281
1282         if (reg < 0)
1283                 return reg;
1284         MLX5_ASSERT(reg != REG_NON);
1285         if (reg == REG_C_0) {
1286                 struct mlx5_priv *priv = dev->data->dev_private;
1287                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1288                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1289
1290                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1291                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1292                 mask = rte_cpu_to_be_32(mask << shl_c0);
1293         }
1294         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1295         /* The routine expects parameters in memory as big-endian ones. */
1296         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1297                                              MLX5_MODIFICATION_TYPE_SET, error);
1298 }
1299
1300 /**
1301  * Convert modify-header set IPv4 DSCP action to DV specification.
1302  *
1303  * @param[in,out] resource
1304  *   Pointer to the modify-header resource.
1305  * @param[in] action
1306  *   Pointer to action specification.
1307  * @param[out] error
1308  *   Pointer to the error structure.
1309  *
1310  * @return
1311  *   0 on success, a negative errno value otherwise and rte_errno is set.
1312  */
1313 static int
1314 flow_dv_convert_action_modify_ipv4_dscp
1315                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1316                          const struct rte_flow_action *action,
1317                          struct rte_flow_error *error)
1318 {
1319         const struct rte_flow_action_set_dscp *conf =
1320                 (const struct rte_flow_action_set_dscp *)(action->conf);
1321         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1322         struct rte_flow_item_ipv4 ipv4;
1323         struct rte_flow_item_ipv4 ipv4_mask;
1324
1325         memset(&ipv4, 0, sizeof(ipv4));
1326         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1327         ipv4.hdr.type_of_service = conf->dscp;
1328         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1329         item.spec = &ipv4;
1330         item.mask = &ipv4_mask;
1331         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1332                                              MLX5_MODIFICATION_TYPE_SET, error);
1333 }
1334
1335 /**
1336  * Convert modify-header set IPv6 DSCP action to DV specification.
1337  *
1338  * @param[in,out] resource
1339  *   Pointer to the modify-header resource.
1340  * @param[in] action
1341  *   Pointer to action specification.
1342  * @param[out] error
1343  *   Pointer to the error structure.
1344  *
1345  * @return
1346  *   0 on success, a negative errno value otherwise and rte_errno is set.
1347  */
1348 static int
1349 flow_dv_convert_action_modify_ipv6_dscp
1350                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1351                          const struct rte_flow_action *action,
1352                          struct rte_flow_error *error)
1353 {
1354         const struct rte_flow_action_set_dscp *conf =
1355                 (const struct rte_flow_action_set_dscp *)(action->conf);
1356         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1357         struct rte_flow_item_ipv6 ipv6;
1358         struct rte_flow_item_ipv6 ipv6_mask;
1359
1360         memset(&ipv6, 0, sizeof(ipv6));
1361         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1362         /*
1363          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1364          * rdma-core only accept the DSCP bits byte aligned start from
1365          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1366          * bits in IPv6 case as rdma-core requires byte aligned value.
1367          */
1368         ipv6.hdr.vtc_flow = conf->dscp;
1369         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1370         item.spec = &ipv6;
1371         item.mask = &ipv6_mask;
1372         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1373                                              MLX5_MODIFICATION_TYPE_SET, error);
1374 }
1375
1376 static int
1377 mlx5_flow_item_field_width(struct mlx5_dev_config *config,
1378                            enum rte_flow_field_id field)
1379 {
1380         switch (field) {
1381         case RTE_FLOW_FIELD_START:
1382                 return 32;
1383         case RTE_FLOW_FIELD_MAC_DST:
1384         case RTE_FLOW_FIELD_MAC_SRC:
1385                 return 48;
1386         case RTE_FLOW_FIELD_VLAN_TYPE:
1387                 return 16;
1388         case RTE_FLOW_FIELD_VLAN_ID:
1389                 return 12;
1390         case RTE_FLOW_FIELD_MAC_TYPE:
1391                 return 16;
1392         case RTE_FLOW_FIELD_IPV4_DSCP:
1393                 return 6;
1394         case RTE_FLOW_FIELD_IPV4_TTL:
1395                 return 8;
1396         case RTE_FLOW_FIELD_IPV4_SRC:
1397         case RTE_FLOW_FIELD_IPV4_DST:
1398                 return 32;
1399         case RTE_FLOW_FIELD_IPV6_DSCP:
1400                 return 6;
1401         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1402                 return 8;
1403         case RTE_FLOW_FIELD_IPV6_SRC:
1404         case RTE_FLOW_FIELD_IPV6_DST:
1405                 return 128;
1406         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1407         case RTE_FLOW_FIELD_TCP_PORT_DST:
1408                 return 16;
1409         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1410         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1411                 return 32;
1412         case RTE_FLOW_FIELD_TCP_FLAGS:
1413                 return 9;
1414         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1415         case RTE_FLOW_FIELD_UDP_PORT_DST:
1416                 return 16;
1417         case RTE_FLOW_FIELD_VXLAN_VNI:
1418         case RTE_FLOW_FIELD_GENEVE_VNI:
1419                 return 24;
1420         case RTE_FLOW_FIELD_GTP_TEID:
1421         case RTE_FLOW_FIELD_TAG:
1422                 return 32;
1423         case RTE_FLOW_FIELD_MARK:
1424                 return 24;
1425         case RTE_FLOW_FIELD_META:
1426                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_META16)
1427                         return 16;
1428                 else if (config->dv_xmeta_en == MLX5_XMETA_MODE_META32)
1429                         return 32;
1430                 else
1431                         return 0;
1432         case RTE_FLOW_FIELD_POINTER:
1433         case RTE_FLOW_FIELD_VALUE:
1434                 return 64;
1435         default:
1436                 MLX5_ASSERT(false);
1437         }
1438         return 0;
1439 }
1440
1441 static void
1442 mlx5_flow_field_id_to_modify_info
1443                 (const struct rte_flow_action_modify_data *data,
1444                  struct field_modify_info *info,
1445                  uint32_t *mask, uint32_t *value,
1446                  uint32_t width, uint32_t dst_width,
1447                  struct rte_eth_dev *dev,
1448                  const struct rte_flow_attr *attr,
1449                  struct rte_flow_error *error)
1450 {
1451         struct mlx5_priv *priv = dev->data->dev_private;
1452         struct mlx5_dev_config *config = &priv->config;
1453         uint32_t idx = 0;
1454         uint32_t off = 0;
1455         uint64_t val = 0;
1456         switch (data->field) {
1457         case RTE_FLOW_FIELD_START:
1458                 /* not supported yet */
1459                 MLX5_ASSERT(false);
1460                 break;
1461         case RTE_FLOW_FIELD_MAC_DST:
1462                 off = data->offset > 16 ? data->offset - 16 : 0;
1463                 if (mask) {
1464                         if (data->offset < 16) {
1465                                 info[idx] = (struct field_modify_info){2, 0,
1466                                                 MLX5_MODI_OUT_DMAC_15_0};
1467                                 if (width < 16) {
1468                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1469                                                                  (16 - width));
1470                                         width = 0;
1471                                 } else {
1472                                         mask[idx] = RTE_BE16(0xffff);
1473                                         width -= 16;
1474                                 }
1475                                 if (!width)
1476                                         break;
1477                                 ++idx;
1478                         }
1479                         info[idx] = (struct field_modify_info){4, 4 * idx,
1480                                                 MLX5_MODI_OUT_DMAC_47_16};
1481                         mask[idx] = rte_cpu_to_be_32((0xffffffff >>
1482                                                       (32 - width)) << off);
1483                 } else {
1484                         if (data->offset < 16)
1485                                 info[idx++] = (struct field_modify_info){2, 0,
1486                                                 MLX5_MODI_OUT_DMAC_15_0};
1487                         info[idx] = (struct field_modify_info){4, off,
1488                                                 MLX5_MODI_OUT_DMAC_47_16};
1489                 }
1490                 break;
1491         case RTE_FLOW_FIELD_MAC_SRC:
1492                 off = data->offset > 16 ? data->offset - 16 : 0;
1493                 if (mask) {
1494                         if (data->offset < 16) {
1495                                 info[idx] = (struct field_modify_info){2, 0,
1496                                                 MLX5_MODI_OUT_SMAC_15_0};
1497                                 if (width < 16) {
1498                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1499                                                                  (16 - width));
1500                                         width = 0;
1501                                 } else {
1502                                         mask[idx] = RTE_BE16(0xffff);
1503                                         width -= 16;
1504                                 }
1505                                 if (!width)
1506                                         break;
1507                                 ++idx;
1508                         }
1509                         info[idx] = (struct field_modify_info){4, 4 * idx,
1510                                                 MLX5_MODI_OUT_SMAC_47_16};
1511                         mask[idx] = rte_cpu_to_be_32((0xffffffff >>
1512                                                       (32 - width)) << off);
1513                 } else {
1514                         if (data->offset < 16)
1515                                 info[idx++] = (struct field_modify_info){2, 0,
1516                                                 MLX5_MODI_OUT_SMAC_15_0};
1517                         info[idx] = (struct field_modify_info){4, off,
1518                                                 MLX5_MODI_OUT_SMAC_47_16};
1519                 }
1520                 break;
1521         case RTE_FLOW_FIELD_VLAN_TYPE:
1522                 /* not supported yet */
1523                 break;
1524         case RTE_FLOW_FIELD_VLAN_ID:
1525                 info[idx] = (struct field_modify_info){2, 0,
1526                                         MLX5_MODI_OUT_FIRST_VID};
1527                 if (mask)
1528                         mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1529                 break;
1530         case RTE_FLOW_FIELD_MAC_TYPE:
1531                 info[idx] = (struct field_modify_info){2, 0,
1532                                         MLX5_MODI_OUT_ETHERTYPE};
1533                 if (mask)
1534                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1535                 break;
1536         case RTE_FLOW_FIELD_IPV4_DSCP:
1537                 info[idx] = (struct field_modify_info){1, 0,
1538                                         MLX5_MODI_OUT_IP_DSCP};
1539                 if (mask)
1540                         mask[idx] = 0x3f >> (6 - width);
1541                 break;
1542         case RTE_FLOW_FIELD_IPV4_TTL:
1543                 info[idx] = (struct field_modify_info){1, 0,
1544                                         MLX5_MODI_OUT_IPV4_TTL};
1545                 if (mask)
1546                         mask[idx] = 0xff >> (8 - width);
1547                 break;
1548         case RTE_FLOW_FIELD_IPV4_SRC:
1549                 info[idx] = (struct field_modify_info){4, 0,
1550                                         MLX5_MODI_OUT_SIPV4};
1551                 if (mask)
1552                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1553                                                      (32 - width));
1554                 break;
1555         case RTE_FLOW_FIELD_IPV4_DST:
1556                 info[idx] = (struct field_modify_info){4, 0,
1557                                         MLX5_MODI_OUT_DIPV4};
1558                 if (mask)
1559                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1560                                                      (32 - width));
1561                 break;
1562         case RTE_FLOW_FIELD_IPV6_DSCP:
1563                 info[idx] = (struct field_modify_info){1, 0,
1564                                         MLX5_MODI_OUT_IP_DSCP};
1565                 if (mask)
1566                         mask[idx] = 0x3f >> (6 - width);
1567                 break;
1568         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1569                 info[idx] = (struct field_modify_info){1, 0,
1570                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1571                 if (mask)
1572                         mask[idx] = 0xff >> (8 - width);
1573                 break;
1574         case RTE_FLOW_FIELD_IPV6_SRC:
1575                 if (mask) {
1576                         if (data->offset < 32) {
1577                                 info[idx] = (struct field_modify_info){4,
1578                                                 4 * idx,
1579                                                 MLX5_MODI_OUT_SIPV6_31_0};
1580                                 if (width < 32) {
1581                                         mask[idx] =
1582                                                 rte_cpu_to_be_32(0xffffffff >>
1583                                                                  (32 - width));
1584                                         width = 0;
1585                                 } else {
1586                                         mask[idx] = RTE_BE32(0xffffffff);
1587                                         width -= 32;
1588                                 }
1589                                 if (!width)
1590                                         break;
1591                                 ++idx;
1592                         }
1593                         if (data->offset < 64) {
1594                                 info[idx] = (struct field_modify_info){4,
1595                                                 4 * idx,
1596                                                 MLX5_MODI_OUT_SIPV6_63_32};
1597                                 if (width < 32) {
1598                                         mask[idx] =
1599                                                 rte_cpu_to_be_32(0xffffffff >>
1600                                                                  (32 - width));
1601                                         width = 0;
1602                                 } else {
1603                                         mask[idx] = RTE_BE32(0xffffffff);
1604                                         width -= 32;
1605                                 }
1606                                 if (!width)
1607                                         break;
1608                                 ++idx;
1609                         }
1610                         if (data->offset < 96) {
1611                                 info[idx] = (struct field_modify_info){4,
1612                                                 4 * idx,
1613                                                 MLX5_MODI_OUT_SIPV6_95_64};
1614                                 if (width < 32) {
1615                                         mask[idx] =
1616                                                 rte_cpu_to_be_32(0xffffffff >>
1617                                                                  (32 - width));
1618                                         width = 0;
1619                                 } else {
1620                                         mask[idx] = RTE_BE32(0xffffffff);
1621                                         width -= 32;
1622                                 }
1623                                 if (!width)
1624                                         break;
1625                                 ++idx;
1626                         }
1627                         info[idx] = (struct field_modify_info){4, 4 * idx,
1628                                                 MLX5_MODI_OUT_SIPV6_127_96};
1629                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1630                                                      (32 - width));
1631                 } else {
1632                         if (data->offset < 32)
1633                                 info[idx++] = (struct field_modify_info){4, 0,
1634                                                 MLX5_MODI_OUT_SIPV6_31_0};
1635                         if (data->offset < 64)
1636                                 info[idx++] = (struct field_modify_info){4, 0,
1637                                                 MLX5_MODI_OUT_SIPV6_63_32};
1638                         if (data->offset < 96)
1639                                 info[idx++] = (struct field_modify_info){4, 0,
1640                                                 MLX5_MODI_OUT_SIPV6_95_64};
1641                         if (data->offset < 128)
1642                                 info[idx++] = (struct field_modify_info){4, 0,
1643                                                 MLX5_MODI_OUT_SIPV6_127_96};
1644                 }
1645                 break;
1646         case RTE_FLOW_FIELD_IPV6_DST:
1647                 if (mask) {
1648                         if (data->offset < 32) {
1649                                 info[idx] = (struct field_modify_info){4,
1650                                                 4 * idx,
1651                                                 MLX5_MODI_OUT_DIPV6_31_0};
1652                                 if (width < 32) {
1653                                         mask[idx] =
1654                                                 rte_cpu_to_be_32(0xffffffff >>
1655                                                                  (32 - width));
1656                                         width = 0;
1657                                 } else {
1658                                         mask[idx] = RTE_BE32(0xffffffff);
1659                                         width -= 32;
1660                                 }
1661                                 if (!width)
1662                                         break;
1663                                 ++idx;
1664                         }
1665                         if (data->offset < 64) {
1666                                 info[idx] = (struct field_modify_info){4,
1667                                                 4 * idx,
1668                                                 MLX5_MODI_OUT_DIPV6_63_32};
1669                                 if (width < 32) {
1670                                         mask[idx] =
1671                                                 rte_cpu_to_be_32(0xffffffff >>
1672                                                                  (32 - width));
1673                                         width = 0;
1674                                 } else {
1675                                         mask[idx] = RTE_BE32(0xffffffff);
1676                                         width -= 32;
1677                                 }
1678                                 if (!width)
1679                                         break;
1680                                 ++idx;
1681                         }
1682                         if (data->offset < 96) {
1683                                 info[idx] = (struct field_modify_info){4,
1684                                                 4 * idx,
1685                                                 MLX5_MODI_OUT_DIPV6_95_64};
1686                                 if (width < 32) {
1687                                         mask[idx] =
1688                                                 rte_cpu_to_be_32(0xffffffff >>
1689                                                                  (32 - width));
1690                                         width = 0;
1691                                 } else {
1692                                         mask[idx] = RTE_BE32(0xffffffff);
1693                                         width -= 32;
1694                                 }
1695                                 if (!width)
1696                                         break;
1697                                 ++idx;
1698                         }
1699                         info[idx] = (struct field_modify_info){4, 4 * idx,
1700                                                 MLX5_MODI_OUT_DIPV6_127_96};
1701                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1702                                                      (32 - width));
1703                 } else {
1704                         if (data->offset < 32)
1705                                 info[idx++] = (struct field_modify_info){4, 0,
1706                                                 MLX5_MODI_OUT_DIPV6_31_0};
1707                         if (data->offset < 64)
1708                                 info[idx++] = (struct field_modify_info){4, 0,
1709                                                 MLX5_MODI_OUT_DIPV6_63_32};
1710                         if (data->offset < 96)
1711                                 info[idx++] = (struct field_modify_info){4, 0,
1712                                                 MLX5_MODI_OUT_DIPV6_95_64};
1713                         if (data->offset < 128)
1714                                 info[idx++] = (struct field_modify_info){4, 0,
1715                                                 MLX5_MODI_OUT_DIPV6_127_96};
1716                 }
1717                 break;
1718         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1719                 info[idx] = (struct field_modify_info){2, 0,
1720                                         MLX5_MODI_OUT_TCP_SPORT};
1721                 if (mask)
1722                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1723                 break;
1724         case RTE_FLOW_FIELD_TCP_PORT_DST:
1725                 info[idx] = (struct field_modify_info){2, 0,
1726                                         MLX5_MODI_OUT_TCP_DPORT};
1727                 if (mask)
1728                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1729                 break;
1730         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1731                 info[idx] = (struct field_modify_info){4, 0,
1732                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1733                 if (mask)
1734                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1735                                                      (32 - width));
1736                 break;
1737         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1738                 info[idx] = (struct field_modify_info){4, 0,
1739                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1740                 if (mask)
1741                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1742                                                      (32 - width));
1743                 break;
1744         case RTE_FLOW_FIELD_TCP_FLAGS:
1745                 info[idx] = (struct field_modify_info){2, 0,
1746                                         MLX5_MODI_OUT_TCP_FLAGS};
1747                 if (mask)
1748                         mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
1749                 break;
1750         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1751                 info[idx] = (struct field_modify_info){2, 0,
1752                                         MLX5_MODI_OUT_UDP_SPORT};
1753                 if (mask)
1754                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1755                 break;
1756         case RTE_FLOW_FIELD_UDP_PORT_DST:
1757                 info[idx] = (struct field_modify_info){2, 0,
1758                                         MLX5_MODI_OUT_UDP_DPORT};
1759                 if (mask)
1760                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1761                 break;
1762         case RTE_FLOW_FIELD_VXLAN_VNI:
1763                 /* not supported yet */
1764                 break;
1765         case RTE_FLOW_FIELD_GENEVE_VNI:
1766                 /* not supported yet*/
1767                 break;
1768         case RTE_FLOW_FIELD_GTP_TEID:
1769                 info[idx] = (struct field_modify_info){4, 0,
1770                                         MLX5_MODI_GTP_TEID};
1771                 if (mask)
1772                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1773                                                      (32 - width));
1774                 break;
1775         case RTE_FLOW_FIELD_TAG:
1776                 {
1777                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1778                                                    data->level, error);
1779                         if (reg < 0)
1780                                 return;
1781                         MLX5_ASSERT(reg != REG_NON);
1782                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1783                         info[idx] = (struct field_modify_info){4, 0,
1784                                                 reg_to_field[reg]};
1785                         if (mask)
1786                                 mask[idx] =
1787                                         rte_cpu_to_be_32(0xffffffff >>
1788                                                          (32 - width));
1789                 }
1790                 break;
1791         case RTE_FLOW_FIELD_MARK:
1792                 {
1793                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1794                                                        0, error);
1795                         if (reg < 0)
1796                                 return;
1797                         MLX5_ASSERT(reg != REG_NON);
1798                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1799                         info[idx] = (struct field_modify_info){4, 0,
1800                                                 reg_to_field[reg]};
1801                         if (mask)
1802                                 mask[idx] =
1803                                         rte_cpu_to_be_32(0xffffffff >>
1804                                                          (32 - width));
1805                 }
1806                 break;
1807         case RTE_FLOW_FIELD_META:
1808                 {
1809                         unsigned int xmeta = config->dv_xmeta_en;
1810                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1811                         if (reg < 0)
1812                                 return;
1813                         MLX5_ASSERT(reg != REG_NON);
1814                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1815                         if (xmeta == MLX5_XMETA_MODE_META16) {
1816                                 info[idx] = (struct field_modify_info){2, 0,
1817                                                         reg_to_field[reg]};
1818                                 if (mask)
1819                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1820                                                                 (16 - width));
1821                         } else if (xmeta == MLX5_XMETA_MODE_META32) {
1822                                 info[idx] = (struct field_modify_info){4, 0,
1823                                                         reg_to_field[reg]};
1824                                 if (mask)
1825                                         mask[idx] =
1826                                                 rte_cpu_to_be_32(0xffffffff >>
1827                                                                 (32 - width));
1828                         } else {
1829                                 MLX5_ASSERT(false);
1830                         }
1831                 }
1832                 break;
1833         case RTE_FLOW_FIELD_POINTER:
1834         case RTE_FLOW_FIELD_VALUE:
1835                 if (data->field == RTE_FLOW_FIELD_POINTER)
1836                         memcpy(&val, (void *)(uintptr_t)data->value,
1837                                sizeof(uint64_t));
1838                 else
1839                         val = data->value;
1840                 for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
1841                         if (mask[idx]) {
1842                                 if (dst_width == 48) {
1843                                         /*special case for MAC addresses */
1844                                         value[idx] = rte_cpu_to_be_16(val);
1845                                         val >>= 16;
1846                                         dst_width -= 16;
1847                                 } else if (dst_width > 16) {
1848                                         value[idx] = rte_cpu_to_be_32(val);
1849                                         val >>= 32;
1850                                 } else if (dst_width > 8) {
1851                                         value[idx] = rte_cpu_to_be_16(val);
1852                                         val >>= 16;
1853                                 } else {
1854                                         value[idx] = (uint8_t)val;
1855                                         val >>= 8;
1856                                 }
1857                                 if (!val)
1858                                         break;
1859                         }
1860                 }
1861                 break;
1862         default:
1863                 MLX5_ASSERT(false);
1864                 break;
1865         }
1866 }
1867
1868 /**
1869  * Convert modify_field action to DV specification.
1870  *
1871  * @param[in] dev
1872  *   Pointer to the rte_eth_dev structure.
1873  * @param[in,out] resource
1874  *   Pointer to the modify-header resource.
1875  * @param[in] action
1876  *   Pointer to action specification.
1877  * @param[in] attr
1878  *   Attributes of flow that includes this item.
1879  * @param[out] error
1880  *   Pointer to the error structure.
1881  *
1882  * @return
1883  *   0 on success, a negative errno value otherwise and rte_errno is set.
1884  */
1885 static int
1886 flow_dv_convert_action_modify_field
1887                         (struct rte_eth_dev *dev,
1888                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1889                          const struct rte_flow_action *action,
1890                          const struct rte_flow_attr *attr,
1891                          struct rte_flow_error *error)
1892 {
1893         struct mlx5_priv *priv = dev->data->dev_private;
1894         struct mlx5_dev_config *config = &priv->config;
1895         const struct rte_flow_action_modify_field *conf =
1896                 (const struct rte_flow_action_modify_field *)(action->conf);
1897         struct rte_flow_item item;
1898         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1899                                                                 {0, 0, 0} };
1900         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1901                                                                 {0, 0, 0} };
1902         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1903         uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1904         uint32_t type;
1905         uint32_t dst_width = mlx5_flow_item_field_width(config,
1906                                                         conf->dst.field);
1907
1908         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1909                 conf->src.field == RTE_FLOW_FIELD_VALUE) {
1910                 type = MLX5_MODIFICATION_TYPE_SET;
1911                 /** For SET fill the destination field (field) first. */
1912                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1913                         value, conf->width, dst_width, dev, attr, error);
1914                 /** Then copy immediate value from source as per mask. */
1915                 mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask,
1916                         value, conf->width, dst_width, dev, attr, error);
1917                 item.spec = &value;
1918         } else {
1919                 type = MLX5_MODIFICATION_TYPE_COPY;
1920                 /** For COPY fill the destination field (dcopy) without mask. */
1921                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1922                         value, conf->width, dst_width, dev, attr, error);
1923                 /** Then construct the source field (field) with mask. */
1924                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1925                         value, conf->width, dst_width, dev, attr, error);
1926         }
1927         item.mask = &mask;
1928         return flow_dv_convert_modify_action(&item,
1929                         field, dcopy, resource, type, error);
1930 }
1931
1932 /**
1933  * Validate MARK item.
1934  *
1935  * @param[in] dev
1936  *   Pointer to the rte_eth_dev structure.
1937  * @param[in] item
1938  *   Item specification.
1939  * @param[in] attr
1940  *   Attributes of flow that includes this item.
1941  * @param[out] error
1942  *   Pointer to error structure.
1943  *
1944  * @return
1945  *   0 on success, a negative errno value otherwise and rte_errno is set.
1946  */
1947 static int
1948 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1949                            const struct rte_flow_item *item,
1950                            const struct rte_flow_attr *attr __rte_unused,
1951                            struct rte_flow_error *error)
1952 {
1953         struct mlx5_priv *priv = dev->data->dev_private;
1954         struct mlx5_dev_config *config = &priv->config;
1955         const struct rte_flow_item_mark *spec = item->spec;
1956         const struct rte_flow_item_mark *mask = item->mask;
1957         const struct rte_flow_item_mark nic_mask = {
1958                 .id = priv->sh->dv_mark_mask,
1959         };
1960         int ret;
1961
1962         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1963                 return rte_flow_error_set(error, ENOTSUP,
1964                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1965                                           "extended metadata feature"
1966                                           " isn't enabled");
1967         if (!mlx5_flow_ext_mreg_supported(dev))
1968                 return rte_flow_error_set(error, ENOTSUP,
1969                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1970                                           "extended metadata register"
1971                                           " isn't supported");
1972         if (!nic_mask.id)
1973                 return rte_flow_error_set(error, ENOTSUP,
1974                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1975                                           "extended metadata register"
1976                                           " isn't available");
1977         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1978         if (ret < 0)
1979                 return ret;
1980         if (!spec)
1981                 return rte_flow_error_set(error, EINVAL,
1982                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1983                                           item->spec,
1984                                           "data cannot be empty");
1985         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1986                 return rte_flow_error_set(error, EINVAL,
1987                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1988                                           &spec->id,
1989                                           "mark id exceeds the limit");
1990         if (!mask)
1991                 mask = &nic_mask;
1992         if (!mask->id)
1993                 return rte_flow_error_set(error, EINVAL,
1994                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1995                                         "mask cannot be zero");
1996
1997         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1998                                         (const uint8_t *)&nic_mask,
1999                                         sizeof(struct rte_flow_item_mark),
2000                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2001         if (ret < 0)
2002                 return ret;
2003         return 0;
2004 }
2005
2006 /**
2007  * Validate META item.
2008  *
2009  * @param[in] dev
2010  *   Pointer to the rte_eth_dev structure.
2011  * @param[in] item
2012  *   Item specification.
2013  * @param[in] attr
2014  *   Attributes of flow that includes this item.
2015  * @param[out] error
2016  *   Pointer to error structure.
2017  *
2018  * @return
2019  *   0 on success, a negative errno value otherwise and rte_errno is set.
2020  */
2021 static int
2022 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
2023                            const struct rte_flow_item *item,
2024                            const struct rte_flow_attr *attr,
2025                            struct rte_flow_error *error)
2026 {
2027         struct mlx5_priv *priv = dev->data->dev_private;
2028         struct mlx5_dev_config *config = &priv->config;
2029         const struct rte_flow_item_meta *spec = item->spec;
2030         const struct rte_flow_item_meta *mask = item->mask;
2031         struct rte_flow_item_meta nic_mask = {
2032                 .data = UINT32_MAX
2033         };
2034         int reg;
2035         int ret;
2036
2037         if (!spec)
2038                 return rte_flow_error_set(error, EINVAL,
2039                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2040                                           item->spec,
2041                                           "data cannot be empty");
2042         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2043                 if (!mlx5_flow_ext_mreg_supported(dev))
2044                         return rte_flow_error_set(error, ENOTSUP,
2045                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2046                                           "extended metadata register"
2047                                           " isn't supported");
2048                 reg = flow_dv_get_metadata_reg(dev, attr, error);
2049                 if (reg < 0)
2050                         return reg;
2051                 if (reg == REG_NON)
2052                         return rte_flow_error_set(error, ENOTSUP,
2053                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2054                                         "unavalable extended metadata register");
2055                 if (reg == REG_B)
2056                         return rte_flow_error_set(error, ENOTSUP,
2057                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2058                                           "match on reg_b "
2059                                           "isn't supported");
2060                 if (reg != REG_A)
2061                         nic_mask.data = priv->sh->dv_meta_mask;
2062         } else {
2063                 if (attr->transfer)
2064                         return rte_flow_error_set(error, ENOTSUP,
2065                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2066                                         "extended metadata feature "
2067                                         "should be enabled when "
2068                                         "meta item is requested "
2069                                         "with e-switch mode ");
2070                 if (attr->ingress)
2071                         return rte_flow_error_set(error, ENOTSUP,
2072                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2073                                         "match on metadata for ingress "
2074                                         "is not supported in legacy "
2075                                         "metadata mode");
2076         }
2077         if (!mask)
2078                 mask = &rte_flow_item_meta_mask;
2079         if (!mask->data)
2080                 return rte_flow_error_set(error, EINVAL,
2081                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2082                                         "mask cannot be zero");
2083
2084         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2085                                         (const uint8_t *)&nic_mask,
2086                                         sizeof(struct rte_flow_item_meta),
2087                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2088         return ret;
2089 }
2090
2091 /**
2092  * Validate TAG item.
2093  *
2094  * @param[in] dev
2095  *   Pointer to the rte_eth_dev structure.
2096  * @param[in] item
2097  *   Item specification.
2098  * @param[in] attr
2099  *   Attributes of flow that includes this item.
2100  * @param[out] error
2101  *   Pointer to error structure.
2102  *
2103  * @return
2104  *   0 on success, a negative errno value otherwise and rte_errno is set.
2105  */
2106 static int
2107 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2108                           const struct rte_flow_item *item,
2109                           const struct rte_flow_attr *attr __rte_unused,
2110                           struct rte_flow_error *error)
2111 {
2112         const struct rte_flow_item_tag *spec = item->spec;
2113         const struct rte_flow_item_tag *mask = item->mask;
2114         const struct rte_flow_item_tag nic_mask = {
2115                 .data = RTE_BE32(UINT32_MAX),
2116                 .index = 0xff,
2117         };
2118         int ret;
2119
2120         if (!mlx5_flow_ext_mreg_supported(dev))
2121                 return rte_flow_error_set(error, ENOTSUP,
2122                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2123                                           "extensive metadata register"
2124                                           " isn't supported");
2125         if (!spec)
2126                 return rte_flow_error_set(error, EINVAL,
2127                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2128                                           item->spec,
2129                                           "data cannot be empty");
2130         if (!mask)
2131                 mask = &rte_flow_item_tag_mask;
2132         if (!mask->data)
2133                 return rte_flow_error_set(error, EINVAL,
2134                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2135                                         "mask cannot be zero");
2136
2137         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2138                                         (const uint8_t *)&nic_mask,
2139                                         sizeof(struct rte_flow_item_tag),
2140                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2141         if (ret < 0)
2142                 return ret;
2143         if (mask->index != 0xff)
2144                 return rte_flow_error_set(error, EINVAL,
2145                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2146                                           "partial mask for tag index"
2147                                           " is not supported");
2148         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2149         if (ret < 0)
2150                 return ret;
2151         MLX5_ASSERT(ret != REG_NON);
2152         return 0;
2153 }
2154
2155 /**
2156  * Validate vport item.
2157  *
2158  * @param[in] dev
2159  *   Pointer to the rte_eth_dev structure.
2160  * @param[in] item
2161  *   Item specification.
2162  * @param[in] attr
2163  *   Attributes of flow that includes this item.
2164  * @param[in] item_flags
2165  *   Bit-fields that holds the items detected until now.
2166  * @param[out] error
2167  *   Pointer to error structure.
2168  *
2169  * @return
2170  *   0 on success, a negative errno value otherwise and rte_errno is set.
2171  */
2172 static int
2173 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2174                               const struct rte_flow_item *item,
2175                               const struct rte_flow_attr *attr,
2176                               uint64_t item_flags,
2177                               struct rte_flow_error *error)
2178 {
2179         const struct rte_flow_item_port_id *spec = item->spec;
2180         const struct rte_flow_item_port_id *mask = item->mask;
2181         const struct rte_flow_item_port_id switch_mask = {
2182                         .id = 0xffffffff,
2183         };
2184         struct mlx5_priv *esw_priv;
2185         struct mlx5_priv *dev_priv;
2186         int ret;
2187
2188         if (!attr->transfer)
2189                 return rte_flow_error_set(error, EINVAL,
2190                                           RTE_FLOW_ERROR_TYPE_ITEM,
2191                                           NULL,
2192                                           "match on port id is valid only"
2193                                           " when transfer flag is enabled");
2194         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2195                 return rte_flow_error_set(error, ENOTSUP,
2196                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2197                                           "multiple source ports are not"
2198                                           " supported");
2199         if (!mask)
2200                 mask = &switch_mask;
2201         if (mask->id != 0xffffffff)
2202                 return rte_flow_error_set(error, ENOTSUP,
2203                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2204                                            mask,
2205                                            "no support for partial mask on"
2206                                            " \"id\" field");
2207         ret = mlx5_flow_item_acceptable
2208                                 (item, (const uint8_t *)mask,
2209                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2210                                  sizeof(struct rte_flow_item_port_id),
2211                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2212         if (ret)
2213                 return ret;
2214         if (!spec)
2215                 return 0;
2216         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2217         if (!esw_priv)
2218                 return rte_flow_error_set(error, rte_errno,
2219                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2220                                           "failed to obtain E-Switch info for"
2221                                           " port");
2222         dev_priv = mlx5_dev_to_eswitch_info(dev);
2223         if (!dev_priv)
2224                 return rte_flow_error_set(error, rte_errno,
2225                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2226                                           NULL,
2227                                           "failed to obtain E-Switch info");
2228         if (esw_priv->domain_id != dev_priv->domain_id)
2229                 return rte_flow_error_set(error, EINVAL,
2230                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2231                                           "cannot match on a port from a"
2232                                           " different E-Switch");
2233         return 0;
2234 }
2235
2236 /**
2237  * Validate VLAN item.
2238  *
2239  * @param[in] item
2240  *   Item specification.
2241  * @param[in] item_flags
2242  *   Bit-fields that holds the items detected until now.
2243  * @param[in] dev
2244  *   Ethernet device flow is being created on.
2245  * @param[out] error
2246  *   Pointer to error structure.
2247  *
2248  * @return
2249  *   0 on success, a negative errno value otherwise and rte_errno is set.
2250  */
2251 static int
2252 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2253                            uint64_t item_flags,
2254                            struct rte_eth_dev *dev,
2255                            struct rte_flow_error *error)
2256 {
2257         const struct rte_flow_item_vlan *mask = item->mask;
2258         const struct rte_flow_item_vlan nic_mask = {
2259                 .tci = RTE_BE16(UINT16_MAX),
2260                 .inner_type = RTE_BE16(UINT16_MAX),
2261                 .has_more_vlan = 1,
2262         };
2263         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2264         int ret;
2265         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2266                                         MLX5_FLOW_LAYER_INNER_L4) :
2267                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2268                                         MLX5_FLOW_LAYER_OUTER_L4);
2269         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2270                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2271
2272         if (item_flags & vlanm)
2273                 return rte_flow_error_set(error, EINVAL,
2274                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2275                                           "multiple VLAN layers not supported");
2276         else if ((item_flags & l34m) != 0)
2277                 return rte_flow_error_set(error, EINVAL,
2278                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2279                                           "VLAN cannot follow L3/L4 layer");
2280         if (!mask)
2281                 mask = &rte_flow_item_vlan_mask;
2282         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2283                                         (const uint8_t *)&nic_mask,
2284                                         sizeof(struct rte_flow_item_vlan),
2285                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2286         if (ret)
2287                 return ret;
2288         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2289                 struct mlx5_priv *priv = dev->data->dev_private;
2290
2291                 if (priv->vmwa_context) {
2292                         /*
2293                          * Non-NULL context means we have a virtual machine
2294                          * and SR-IOV enabled, we have to create VLAN interface
2295                          * to make hypervisor to setup E-Switch vport
2296                          * context correctly. We avoid creating the multiple
2297                          * VLAN interfaces, so we cannot support VLAN tag mask.
2298                          */
2299                         return rte_flow_error_set(error, EINVAL,
2300                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2301                                                   item,
2302                                                   "VLAN tag mask is not"
2303                                                   " supported in virtual"
2304                                                   " environment");
2305                 }
2306         }
2307         return 0;
2308 }
2309
2310 /*
2311  * GTP flags are contained in 1 byte of the format:
2312  * -------------------------------------------
2313  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2314  * |-----------------------------------------|
2315  * | value | Version | PT | Res | E | S | PN |
2316  * -------------------------------------------
2317  *
2318  * Matching is supported only for GTP flags E, S, PN.
2319  */
2320 #define MLX5_GTP_FLAGS_MASK     0x07
2321
2322 /**
2323  * Validate GTP item.
2324  *
2325  * @param[in] dev
2326  *   Pointer to the rte_eth_dev structure.
2327  * @param[in] item
2328  *   Item specification.
2329  * @param[in] item_flags
2330  *   Bit-fields that holds the items detected until now.
2331  * @param[out] error
2332  *   Pointer to error structure.
2333  *
2334  * @return
2335  *   0 on success, a negative errno value otherwise and rte_errno is set.
2336  */
2337 static int
2338 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2339                           const struct rte_flow_item *item,
2340                           uint64_t item_flags,
2341                           struct rte_flow_error *error)
2342 {
2343         struct mlx5_priv *priv = dev->data->dev_private;
2344         const struct rte_flow_item_gtp *spec = item->spec;
2345         const struct rte_flow_item_gtp *mask = item->mask;
2346         const struct rte_flow_item_gtp nic_mask = {
2347                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2348                 .msg_type = 0xff,
2349                 .teid = RTE_BE32(0xffffffff),
2350         };
2351
2352         if (!priv->config.hca_attr.tunnel_stateless_gtp)
2353                 return rte_flow_error_set(error, ENOTSUP,
2354                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2355                                           "GTP support is not enabled");
2356         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2357                 return rte_flow_error_set(error, ENOTSUP,
2358                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2359                                           "multiple tunnel layers not"
2360                                           " supported");
2361         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2362                 return rte_flow_error_set(error, EINVAL,
2363                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2364                                           "no outer UDP layer found");
2365         if (!mask)
2366                 mask = &rte_flow_item_gtp_mask;
2367         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2368                 return rte_flow_error_set(error, ENOTSUP,
2369                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2370                                           "Match is supported for GTP"
2371                                           " flags only");
2372         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2373                                          (const uint8_t *)&nic_mask,
2374                                          sizeof(struct rte_flow_item_gtp),
2375                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2376 }
2377
2378 /**
2379  * Validate GTP PSC item.
2380  *
2381  * @param[in] item
2382  *   Item specification.
2383  * @param[in] last_item
2384  *   Previous validated item in the pattern items.
2385  * @param[in] gtp_item
2386  *   Previous GTP item specification.
2387  * @param[in] attr
2388  *   Pointer to flow attributes.
2389  * @param[out] error
2390  *   Pointer to error structure.
2391  *
2392  * @return
2393  *   0 on success, a negative errno value otherwise and rte_errno is set.
2394  */
2395 static int
2396 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2397                               uint64_t last_item,
2398                               const struct rte_flow_item *gtp_item,
2399                               const struct rte_flow_attr *attr,
2400                               struct rte_flow_error *error)
2401 {
2402         const struct rte_flow_item_gtp *gtp_spec;
2403         const struct rte_flow_item_gtp *gtp_mask;
2404         const struct rte_flow_item_gtp_psc *spec;
2405         const struct rte_flow_item_gtp_psc *mask;
2406         const struct rte_flow_item_gtp_psc nic_mask = {
2407                 .pdu_type = 0xFF,
2408                 .qfi = 0xFF,
2409         };
2410
2411         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2412                 return rte_flow_error_set
2413                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2414                          "GTP PSC item must be preceded with GTP item");
2415         gtp_spec = gtp_item->spec;
2416         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2417         /* GTP spec and E flag is requested to match zero. */
2418         if (gtp_spec &&
2419                 (gtp_mask->v_pt_rsv_flags &
2420                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2421                 return rte_flow_error_set
2422                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2423                          "GTP E flag must be 1 to match GTP PSC");
2424         /* Check the flow is not created in group zero. */
2425         if (!attr->transfer && !attr->group)
2426                 return rte_flow_error_set
2427                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2428                          "GTP PSC is not supported for group 0");
2429         /* GTP spec is here and E flag is requested to match zero. */
2430         if (!item->spec)
2431                 return 0;
2432         spec = item->spec;
2433         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2434         if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE)
2435                 return rte_flow_error_set
2436                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2437                          "PDU type should be smaller than 16");
2438         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2439                                          (const uint8_t *)&nic_mask,
2440                                          sizeof(struct rte_flow_item_gtp_psc),
2441                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2442 }
2443
2444 /**
2445  * Validate IPV4 item.
2446  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2447  * add specific validation of fragment_offset field,
2448  *
2449  * @param[in] item
2450  *   Item specification.
2451  * @param[in] item_flags
2452  *   Bit-fields that holds the items detected until now.
2453  * @param[out] error
2454  *   Pointer to error structure.
2455  *
2456  * @return
2457  *   0 on success, a negative errno value otherwise and rte_errno is set.
2458  */
2459 static int
2460 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
2461                            uint64_t item_flags,
2462                            uint64_t last_item,
2463                            uint16_t ether_type,
2464                            struct rte_flow_error *error)
2465 {
2466         int ret;
2467         const struct rte_flow_item_ipv4 *spec = item->spec;
2468         const struct rte_flow_item_ipv4 *last = item->last;
2469         const struct rte_flow_item_ipv4 *mask = item->mask;
2470         rte_be16_t fragment_offset_spec = 0;
2471         rte_be16_t fragment_offset_last = 0;
2472         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
2473                 .hdr = {
2474                         .src_addr = RTE_BE32(0xffffffff),
2475                         .dst_addr = RTE_BE32(0xffffffff),
2476                         .type_of_service = 0xff,
2477                         .fragment_offset = RTE_BE16(0xffff),
2478                         .next_proto_id = 0xff,
2479                         .time_to_live = 0xff,
2480                 },
2481         };
2482
2483         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2484                                            ether_type, &nic_ipv4_mask,
2485                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2486         if (ret < 0)
2487                 return ret;
2488         if (spec && mask)
2489                 fragment_offset_spec = spec->hdr.fragment_offset &
2490                                        mask->hdr.fragment_offset;
2491         if (!fragment_offset_spec)
2492                 return 0;
2493         /*
2494          * spec and mask are valid, enforce using full mask to make sure the
2495          * complete value is used correctly.
2496          */
2497         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2498                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2499                 return rte_flow_error_set(error, EINVAL,
2500                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2501                                           item, "must use full mask for"
2502                                           " fragment_offset");
2503         /*
2504          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2505          * indicating this is 1st fragment of fragmented packet.
2506          * This is not yet supported in MLX5, return appropriate error message.
2507          */
2508         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2509                 return rte_flow_error_set(error, ENOTSUP,
2510                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2511                                           "match on first fragment not "
2512                                           "supported");
2513         if (fragment_offset_spec && !last)
2514                 return rte_flow_error_set(error, ENOTSUP,
2515                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2516                                           "specified value not supported");
2517         /* spec and last are valid, validate the specified range. */
2518         fragment_offset_last = last->hdr.fragment_offset &
2519                                mask->hdr.fragment_offset;
2520         /*
2521          * Match on fragment_offset spec 0x2001 and last 0x3fff
2522          * means MF is 1 and frag-offset is > 0.
2523          * This packet is fragment 2nd and onward, excluding last.
2524          * This is not yet supported in MLX5, return appropriate
2525          * error message.
2526          */
2527         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2528             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2529                 return rte_flow_error_set(error, ENOTSUP,
2530                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2531                                           last, "match on following "
2532                                           "fragments not supported");
2533         /*
2534          * Match on fragment_offset spec 0x0001 and last 0x1fff
2535          * means MF is 0 and frag-offset is > 0.
2536          * This packet is last fragment of fragmented packet.
2537          * This is not yet supported in MLX5, return appropriate
2538          * error message.
2539          */
2540         if (fragment_offset_spec == RTE_BE16(1) &&
2541             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2542                 return rte_flow_error_set(error, ENOTSUP,
2543                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2544                                           last, "match on last "
2545                                           "fragment not supported");
2546         /*
2547          * Match on fragment_offset spec 0x0001 and last 0x3fff
2548          * means MF and/or frag-offset is not 0.
2549          * This is a fragmented packet.
2550          * Other range values are invalid and rejected.
2551          */
2552         if (!(fragment_offset_spec == RTE_BE16(1) &&
2553               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2554                 return rte_flow_error_set(error, ENOTSUP,
2555                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2556                                           "specified range not supported");
2557         return 0;
2558 }
2559
2560 /**
2561  * Validate IPV6 fragment extension item.
2562  *
2563  * @param[in] item
2564  *   Item specification.
2565  * @param[in] item_flags
2566  *   Bit-fields that holds the items detected until now.
2567  * @param[out] error
2568  *   Pointer to error structure.
2569  *
2570  * @return
2571  *   0 on success, a negative errno value otherwise and rte_errno is set.
2572  */
2573 static int
2574 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2575                                     uint64_t item_flags,
2576                                     struct rte_flow_error *error)
2577 {
2578         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2579         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2580         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2581         rte_be16_t frag_data_spec = 0;
2582         rte_be16_t frag_data_last = 0;
2583         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2584         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2585                                       MLX5_FLOW_LAYER_OUTER_L4;
2586         int ret = 0;
2587         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2588                 .hdr = {
2589                         .next_header = 0xff,
2590                         .frag_data = RTE_BE16(0xffff),
2591                 },
2592         };
2593
2594         if (item_flags & l4m)
2595                 return rte_flow_error_set(error, EINVAL,
2596                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2597                                           "ipv6 fragment extension item cannot "
2598                                           "follow L4 item.");
2599         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2600             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2601                 return rte_flow_error_set(error, EINVAL,
2602                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2603                                           "ipv6 fragment extension item must "
2604                                           "follow ipv6 item");
2605         if (spec && mask)
2606                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2607         if (!frag_data_spec)
2608                 return 0;
2609         /*
2610          * spec and mask are valid, enforce using full mask to make sure the
2611          * complete value is used correctly.
2612          */
2613         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2614                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2615                 return rte_flow_error_set(error, EINVAL,
2616                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2617                                           item, "must use full mask for"
2618                                           " frag_data");
2619         /*
2620          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2621          * This is 1st fragment of fragmented packet.
2622          */
2623         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2624                 return rte_flow_error_set(error, ENOTSUP,
2625                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2626                                           "match on first fragment not "
2627                                           "supported");
2628         if (frag_data_spec && !last)
2629                 return rte_flow_error_set(error, EINVAL,
2630                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2631                                           "specified value not supported");
2632         ret = mlx5_flow_item_acceptable
2633                                 (item, (const uint8_t *)mask,
2634                                  (const uint8_t *)&nic_mask,
2635                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2636                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2637         if (ret)
2638                 return ret;
2639         /* spec and last are valid, validate the specified range. */
2640         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2641         /*
2642          * Match on frag_data spec 0x0009 and last 0xfff9
2643          * means M is 1 and frag-offset is > 0.
2644          * This packet is fragment 2nd and onward, excluding last.
2645          * This is not yet supported in MLX5, return appropriate
2646          * error message.
2647          */
2648         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2649                                        RTE_IPV6_EHDR_MF_MASK) &&
2650             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2651                 return rte_flow_error_set(error, ENOTSUP,
2652                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2653                                           last, "match on following "
2654                                           "fragments not supported");
2655         /*
2656          * Match on frag_data spec 0x0008 and last 0xfff8
2657          * means M is 0 and frag-offset is > 0.
2658          * This packet is last fragment of fragmented packet.
2659          * This is not yet supported in MLX5, return appropriate
2660          * error message.
2661          */
2662         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2663             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2664                 return rte_flow_error_set(error, ENOTSUP,
2665                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2666                                           last, "match on last "
2667                                           "fragment not supported");
2668         /* Other range values are invalid and rejected. */
2669         return rte_flow_error_set(error, EINVAL,
2670                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2671                                   "specified range not supported");
2672 }
2673
2674 /*
2675  * Validate ASO CT item.
2676  *
2677  * @param[in] dev
2678  *   Pointer to the rte_eth_dev structure.
2679  * @param[in] item
2680  *   Item specification.
2681  * @param[in] item_flags
2682  *   Pointer to bit-fields that holds the items detected until now.
2683  * @param[out] error
2684  *   Pointer to error structure.
2685  *
2686  * @return
2687  *   0 on success, a negative errno value otherwise and rte_errno is set.
2688  */
2689 static int
2690 flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
2691                              const struct rte_flow_item *item,
2692                              uint64_t *item_flags,
2693                              struct rte_flow_error *error)
2694 {
2695         const struct rte_flow_item_conntrack *spec = item->spec;
2696         const struct rte_flow_item_conntrack *mask = item->mask;
2697         RTE_SET_USED(dev);
2698         uint32_t flags;
2699
2700         if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
2701                 return rte_flow_error_set(error, EINVAL,
2702                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2703                                           "Only one CT is supported");
2704         if (!mask)
2705                 mask = &rte_flow_item_conntrack_mask;
2706         flags = spec->flags & mask->flags;
2707         if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
2708             ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
2709              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
2710              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
2711                 return rte_flow_error_set(error, EINVAL,
2712                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2713                                           "Conflict status bits");
2714         /* State change also needs to be considered. */
2715         *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
2716         return 0;
2717 }
2718
2719 /**
2720  * Validate the pop VLAN action.
2721  *
2722  * @param[in] dev
2723  *   Pointer to the rte_eth_dev structure.
2724  * @param[in] action_flags
2725  *   Holds the actions detected until now.
2726  * @param[in] action
2727  *   Pointer to the pop vlan action.
2728  * @param[in] item_flags
2729  *   The items found in this flow rule.
2730  * @param[in] attr
2731  *   Pointer to flow attributes.
2732  * @param[out] error
2733  *   Pointer to error structure.
2734  *
2735  * @return
2736  *   0 on success, a negative errno value otherwise and rte_errno is set.
2737  */
2738 static int
2739 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2740                                  uint64_t action_flags,
2741                                  const struct rte_flow_action *action,
2742                                  uint64_t item_flags,
2743                                  const struct rte_flow_attr *attr,
2744                                  struct rte_flow_error *error)
2745 {
2746         const struct mlx5_priv *priv = dev->data->dev_private;
2747
2748         (void)action;
2749         (void)attr;
2750         if (!priv->sh->pop_vlan_action)
2751                 return rte_flow_error_set(error, ENOTSUP,
2752                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2753                                           NULL,
2754                                           "pop vlan action is not supported");
2755         if (attr->egress)
2756                 return rte_flow_error_set(error, ENOTSUP,
2757                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2758                                           NULL,
2759                                           "pop vlan action not supported for "
2760                                           "egress");
2761         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2762                 return rte_flow_error_set(error, ENOTSUP,
2763                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2764                                           "no support for multiple VLAN "
2765                                           "actions");
2766         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2767         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2768             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2769                 return rte_flow_error_set(error, ENOTSUP,
2770                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2771                                           NULL,
2772                                           "cannot pop vlan after decap without "
2773                                           "match on inner vlan in the flow");
2774         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2775         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2776             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2777                 return rte_flow_error_set(error, ENOTSUP,
2778                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2779                                           NULL,
2780                                           "cannot pop vlan without a "
2781                                           "match on (outer) vlan in the flow");
2782         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2783                 return rte_flow_error_set(error, EINVAL,
2784                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2785                                           "wrong action order, port_id should "
2786                                           "be after pop VLAN action");
2787         if (!attr->transfer && priv->representor)
2788                 return rte_flow_error_set(error, ENOTSUP,
2789                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2790                                           "pop vlan action for VF representor "
2791                                           "not supported on NIC table");
2792         return 0;
2793 }
2794
2795 /**
2796  * Get VLAN default info from vlan match info.
2797  *
2798  * @param[in] items
2799  *   the list of item specifications.
2800  * @param[out] vlan
2801  *   pointer VLAN info to fill to.
2802  *
2803  * @return
2804  *   0 on success, a negative errno value otherwise and rte_errno is set.
2805  */
2806 static void
2807 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2808                                   struct rte_vlan_hdr *vlan)
2809 {
2810         const struct rte_flow_item_vlan nic_mask = {
2811                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2812                                 MLX5DV_FLOW_VLAN_VID_MASK),
2813                 .inner_type = RTE_BE16(0xffff),
2814         };
2815
2816         if (items == NULL)
2817                 return;
2818         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2819                 int type = items->type;
2820
2821                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2822                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2823                         break;
2824         }
2825         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2826                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2827                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2828
2829                 /* If VLAN item in pattern doesn't contain data, return here. */
2830                 if (!vlan_v)
2831                         return;
2832                 if (!vlan_m)
2833                         vlan_m = &nic_mask;
2834                 /* Only full match values are accepted */
2835                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2836                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2837                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2838                         vlan->vlan_tci |=
2839                                 rte_be_to_cpu_16(vlan_v->tci &
2840                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2841                 }
2842                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2843                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2844                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2845                         vlan->vlan_tci |=
2846                                 rte_be_to_cpu_16(vlan_v->tci &
2847                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2848                 }
2849                 if (vlan_m->inner_type == nic_mask.inner_type)
2850                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2851                                                            vlan_m->inner_type);
2852         }
2853 }
2854
2855 /**
2856  * Validate the push VLAN action.
2857  *
2858  * @param[in] dev
2859  *   Pointer to the rte_eth_dev structure.
2860  * @param[in] action_flags
2861  *   Holds the actions detected until now.
2862  * @param[in] item_flags
2863  *   The items found in this flow rule.
2864  * @param[in] action
2865  *   Pointer to the action structure.
2866  * @param[in] attr
2867  *   Pointer to flow attributes
2868  * @param[out] error
2869  *   Pointer to error structure.
2870  *
2871  * @return
2872  *   0 on success, a negative errno value otherwise and rte_errno is set.
2873  */
2874 static int
2875 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2876                                   uint64_t action_flags,
2877                                   const struct rte_flow_item_vlan *vlan_m,
2878                                   const struct rte_flow_action *action,
2879                                   const struct rte_flow_attr *attr,
2880                                   struct rte_flow_error *error)
2881 {
2882         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2883         const struct mlx5_priv *priv = dev->data->dev_private;
2884
2885         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2886             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2887                 return rte_flow_error_set(error, EINVAL,
2888                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2889                                           "invalid vlan ethertype");
2890         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2891                 return rte_flow_error_set(error, EINVAL,
2892                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2893                                           "wrong action order, port_id should "
2894                                           "be after push VLAN");
2895         if (!attr->transfer && priv->representor)
2896                 return rte_flow_error_set(error, ENOTSUP,
2897                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2898                                           "push vlan action for VF representor "
2899                                           "not supported on NIC table");
2900         if (vlan_m &&
2901             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2902             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2903                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2904             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2905             !(mlx5_flow_find_action
2906                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2907                 return rte_flow_error_set(error, EINVAL,
2908                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2909                                           "not full match mask on VLAN PCP and "
2910                                           "there is no of_set_vlan_pcp action, "
2911                                           "push VLAN action cannot figure out "
2912                                           "PCP value");
2913         if (vlan_m &&
2914             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2915             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2916                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2917             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2918             !(mlx5_flow_find_action
2919                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2920                 return rte_flow_error_set(error, EINVAL,
2921                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2922                                           "not full match mask on VLAN VID and "
2923                                           "there is no of_set_vlan_vid action, "
2924                                           "push VLAN action cannot figure out "
2925                                           "VID value");
2926         (void)attr;
2927         return 0;
2928 }
2929
2930 /**
2931  * Validate the set VLAN PCP.
2932  *
2933  * @param[in] action_flags
2934  *   Holds the actions detected until now.
2935  * @param[in] actions
2936  *   Pointer to the list of actions remaining in the flow rule.
2937  * @param[out] error
2938  *   Pointer to error structure.
2939  *
2940  * @return
2941  *   0 on success, a negative errno value otherwise and rte_errno is set.
2942  */
2943 static int
2944 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2945                                      const struct rte_flow_action actions[],
2946                                      struct rte_flow_error *error)
2947 {
2948         const struct rte_flow_action *action = actions;
2949         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2950
2951         if (conf->vlan_pcp > 7)
2952                 return rte_flow_error_set(error, EINVAL,
2953                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2954                                           "VLAN PCP value is too big");
2955         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2956                 return rte_flow_error_set(error, ENOTSUP,
2957                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2958                                           "set VLAN PCP action must follow "
2959                                           "the push VLAN action");
2960         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2961                 return rte_flow_error_set(error, ENOTSUP,
2962                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2963                                           "Multiple VLAN PCP modification are "
2964                                           "not supported");
2965         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2966                 return rte_flow_error_set(error, EINVAL,
2967                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2968                                           "wrong action order, port_id should "
2969                                           "be after set VLAN PCP");
2970         return 0;
2971 }
2972
2973 /**
2974  * Validate the set VLAN VID.
2975  *
2976  * @param[in] item_flags
2977  *   Holds the items detected in this rule.
2978  * @param[in] action_flags
2979  *   Holds the actions detected until now.
2980  * @param[in] actions
2981  *   Pointer to the list of actions remaining in the flow rule.
2982  * @param[out] error
2983  *   Pointer to error structure.
2984  *
2985  * @return
2986  *   0 on success, a negative errno value otherwise and rte_errno is set.
2987  */
2988 static int
2989 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2990                                      uint64_t action_flags,
2991                                      const struct rte_flow_action actions[],
2992                                      struct rte_flow_error *error)
2993 {
2994         const struct rte_flow_action *action = actions;
2995         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2996
2997         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2998                 return rte_flow_error_set(error, EINVAL,
2999                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3000                                           "VLAN VID value is too big");
3001         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
3002             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
3003                 return rte_flow_error_set(error, ENOTSUP,
3004                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3005                                           "set VLAN VID action must follow push"
3006                                           " VLAN action or match on VLAN item");
3007         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
3008                 return rte_flow_error_set(error, ENOTSUP,
3009                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3010                                           "Multiple VLAN VID modifications are "
3011                                           "not supported");
3012         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
3013                 return rte_flow_error_set(error, EINVAL,
3014                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3015                                           "wrong action order, port_id should "
3016                                           "be after set VLAN VID");
3017         return 0;
3018 }
3019
3020 /*
3021  * Validate the FLAG action.
3022  *
3023  * @param[in] dev
3024  *   Pointer to the rte_eth_dev structure.
3025  * @param[in] action_flags
3026  *   Holds the actions detected until now.
3027  * @param[in] attr
3028  *   Pointer to flow attributes
3029  * @param[out] error
3030  *   Pointer to error structure.
3031  *
3032  * @return
3033  *   0 on success, a negative errno value otherwise and rte_errno is set.
3034  */
3035 static int
3036 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
3037                              uint64_t action_flags,
3038                              const struct rte_flow_attr *attr,
3039                              struct rte_flow_error *error)
3040 {
3041         struct mlx5_priv *priv = dev->data->dev_private;
3042         struct mlx5_dev_config *config = &priv->config;
3043         int ret;
3044
3045         /* Fall back if no extended metadata register support. */
3046         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3047                 return mlx5_flow_validate_action_flag(action_flags, attr,
3048                                                       error);
3049         /* Extensive metadata mode requires registers. */
3050         if (!mlx5_flow_ext_mreg_supported(dev))
3051                 return rte_flow_error_set(error, ENOTSUP,
3052                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3053                                           "no metadata registers "
3054                                           "to support flag action");
3055         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
3056                 return rte_flow_error_set(error, ENOTSUP,
3057                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3058                                           "extended metadata register"
3059                                           " isn't available");
3060         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3061         if (ret < 0)
3062                 return ret;
3063         MLX5_ASSERT(ret > 0);
3064         if (action_flags & MLX5_FLOW_ACTION_MARK)
3065                 return rte_flow_error_set(error, EINVAL,
3066                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3067                                           "can't mark and flag in same flow");
3068         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3069                 return rte_flow_error_set(error, EINVAL,
3070                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3071                                           "can't have 2 flag"
3072                                           " actions in same flow");
3073         return 0;
3074 }
3075
3076 /**
3077  * Validate MARK action.
3078  *
3079  * @param[in] dev
3080  *   Pointer to the rte_eth_dev structure.
3081  * @param[in] action
3082  *   Pointer to action.
3083  * @param[in] action_flags
3084  *   Holds the actions detected until now.
3085  * @param[in] attr
3086  *   Pointer to flow attributes
3087  * @param[out] error
3088  *   Pointer to error structure.
3089  *
3090  * @return
3091  *   0 on success, a negative errno value otherwise and rte_errno is set.
3092  */
3093 static int
3094 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
3095                              const struct rte_flow_action *action,
3096                              uint64_t action_flags,
3097                              const struct rte_flow_attr *attr,
3098                              struct rte_flow_error *error)
3099 {
3100         struct mlx5_priv *priv = dev->data->dev_private;
3101         struct mlx5_dev_config *config = &priv->config;
3102         const struct rte_flow_action_mark *mark = action->conf;
3103         int ret;
3104
3105         if (is_tunnel_offload_active(dev))
3106                 return rte_flow_error_set(error, ENOTSUP,
3107                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3108                                           "no mark action "
3109                                           "if tunnel offload active");
3110         /* Fall back if no extended metadata register support. */
3111         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3112                 return mlx5_flow_validate_action_mark(action, action_flags,
3113                                                       attr, error);
3114         /* Extensive metadata mode requires registers. */
3115         if (!mlx5_flow_ext_mreg_supported(dev))
3116                 return rte_flow_error_set(error, ENOTSUP,
3117                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3118                                           "no metadata registers "
3119                                           "to support mark action");
3120         if (!priv->sh->dv_mark_mask)
3121                 return rte_flow_error_set(error, ENOTSUP,
3122                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3123                                           "extended metadata register"
3124                                           " isn't available");
3125         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3126         if (ret < 0)
3127                 return ret;
3128         MLX5_ASSERT(ret > 0);
3129         if (!mark)
3130                 return rte_flow_error_set(error, EINVAL,
3131                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3132                                           "configuration cannot be null");
3133         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3134                 return rte_flow_error_set(error, EINVAL,
3135                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3136                                           &mark->id,
3137                                           "mark id exceeds the limit");
3138         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3139                 return rte_flow_error_set(error, EINVAL,
3140                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3141                                           "can't flag and mark in same flow");
3142         if (action_flags & MLX5_FLOW_ACTION_MARK)
3143                 return rte_flow_error_set(error, EINVAL,
3144                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3145                                           "can't have 2 mark actions in same"
3146                                           " flow");
3147         return 0;
3148 }
3149
3150 /**
3151  * Validate SET_META action.
3152  *
3153  * @param[in] dev
3154  *   Pointer to the rte_eth_dev structure.
3155  * @param[in] action
3156  *   Pointer to the action structure.
3157  * @param[in] action_flags
3158  *   Holds the actions detected until now.
3159  * @param[in] attr
3160  *   Pointer to flow attributes
3161  * @param[out] error
3162  *   Pointer to error structure.
3163  *
3164  * @return
3165  *   0 on success, a negative errno value otherwise and rte_errno is set.
3166  */
3167 static int
3168 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3169                                  const struct rte_flow_action *action,
3170                                  uint64_t action_flags __rte_unused,
3171                                  const struct rte_flow_attr *attr,
3172                                  struct rte_flow_error *error)
3173 {
3174         const struct rte_flow_action_set_meta *conf;
3175         uint32_t nic_mask = UINT32_MAX;
3176         int reg;
3177
3178         if (!mlx5_flow_ext_mreg_supported(dev))
3179                 return rte_flow_error_set(error, ENOTSUP,
3180                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3181                                           "extended metadata register"
3182                                           " isn't supported");
3183         reg = flow_dv_get_metadata_reg(dev, attr, error);
3184         if (reg < 0)
3185                 return reg;
3186         if (reg == REG_NON)
3187                 return rte_flow_error_set(error, ENOTSUP,
3188                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3189                                           "unavalable extended metadata register");
3190         if (reg != REG_A && reg != REG_B) {
3191                 struct mlx5_priv *priv = dev->data->dev_private;
3192
3193                 nic_mask = priv->sh->dv_meta_mask;
3194         }
3195         if (!(action->conf))
3196                 return rte_flow_error_set(error, EINVAL,
3197                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3198                                           "configuration cannot be null");
3199         conf = (const struct rte_flow_action_set_meta *)action->conf;
3200         if (!conf->mask)
3201                 return rte_flow_error_set(error, EINVAL,
3202                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3203                                           "zero mask doesn't have any effect");
3204         if (conf->mask & ~nic_mask)
3205                 return rte_flow_error_set(error, EINVAL,
3206                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3207                                           "meta data must be within reg C0");
3208         return 0;
3209 }
3210
3211 /**
3212  * Validate SET_TAG action.
3213  *
3214  * @param[in] dev
3215  *   Pointer to the rte_eth_dev structure.
3216  * @param[in] action
3217  *   Pointer to the action structure.
3218  * @param[in] action_flags
3219  *   Holds the actions detected until now.
3220  * @param[in] attr
3221  *   Pointer to flow attributes
3222  * @param[out] error
3223  *   Pointer to error structure.
3224  *
3225  * @return
3226  *   0 on success, a negative errno value otherwise and rte_errno is set.
3227  */
3228 static int
3229 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3230                                 const struct rte_flow_action *action,
3231                                 uint64_t action_flags,
3232                                 const struct rte_flow_attr *attr,
3233                                 struct rte_flow_error *error)
3234 {
3235         const struct rte_flow_action_set_tag *conf;
3236         const uint64_t terminal_action_flags =
3237                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3238                 MLX5_FLOW_ACTION_RSS;
3239         int ret;
3240
3241         if (!mlx5_flow_ext_mreg_supported(dev))
3242                 return rte_flow_error_set(error, ENOTSUP,
3243                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3244                                           "extensive metadata register"
3245                                           " isn't supported");
3246         if (!(action->conf))
3247                 return rte_flow_error_set(error, EINVAL,
3248                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3249                                           "configuration cannot be null");
3250         conf = (const struct rte_flow_action_set_tag *)action->conf;
3251         if (!conf->mask)
3252                 return rte_flow_error_set(error, EINVAL,
3253                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3254                                           "zero mask doesn't have any effect");
3255         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3256         if (ret < 0)
3257                 return ret;
3258         if (!attr->transfer && attr->ingress &&
3259             (action_flags & terminal_action_flags))
3260                 return rte_flow_error_set(error, EINVAL,
3261                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3262                                           "set_tag has no effect"
3263                                           " with terminal actions");
3264         return 0;
3265 }
3266
3267 /**
3268  * Check if action counter is shared by either old or new mechanism.
3269  *
3270  * @param[in] action
3271  *   Pointer to the action structure.
3272  *
3273  * @return
3274  *   True when counter is shared, false otherwise.
3275  */
3276 static inline bool
3277 is_shared_action_count(const struct rte_flow_action *action)
3278 {
3279         const struct rte_flow_action_count *count =
3280                         (const struct rte_flow_action_count *)action->conf;
3281
3282         if ((int)action->type == MLX5_RTE_FLOW_ACTION_TYPE_COUNT)
3283                 return true;
3284         return !!(count && count->shared);
3285 }
3286
3287 /**
3288  * Validate count action.
3289  *
3290  * @param[in] dev
3291  *   Pointer to rte_eth_dev structure.
3292  * @param[in] shared
3293  *   Indicator if action is shared.
3294  * @param[in] action_flags
3295  *   Holds the actions detected until now.
3296  * @param[out] error
3297  *   Pointer to error structure.
3298  *
3299  * @return
3300  *   0 on success, a negative errno value otherwise and rte_errno is set.
3301  */
3302 static int
3303 flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
3304                               uint64_t action_flags,
3305                               struct rte_flow_error *error)
3306 {
3307         struct mlx5_priv *priv = dev->data->dev_private;
3308
3309         if (!priv->config.devx)
3310                 goto notsup_err;
3311         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3312                 return rte_flow_error_set(error, EINVAL,
3313                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3314                                           "duplicate count actions set");
3315         if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3316             !priv->sh->flow_hit_aso_en)
3317                 return rte_flow_error_set(error, EINVAL,
3318                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3319                                           "old age and shared count combination is not supported");
3320 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3321         return 0;
3322 #endif
3323 notsup_err:
3324         return rte_flow_error_set
3325                       (error, ENOTSUP,
3326                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3327                        NULL,
3328                        "count action not supported");
3329 }
3330
3331 /**
3332  * Validate the L2 encap action.
3333  *
3334  * @param[in] dev
3335  *   Pointer to the rte_eth_dev structure.
3336  * @param[in] action_flags
3337  *   Holds the actions detected until now.
3338  * @param[in] action
3339  *   Pointer to the action structure.
3340  * @param[in] attr
3341  *   Pointer to flow attributes.
3342  * @param[out] error
3343  *   Pointer to error structure.
3344  *
3345  * @return
3346  *   0 on success, a negative errno value otherwise and rte_errno is set.
3347  */
3348 static int
3349 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3350                                  uint64_t action_flags,
3351                                  const struct rte_flow_action *action,
3352                                  const struct rte_flow_attr *attr,
3353                                  struct rte_flow_error *error)
3354 {
3355         const struct mlx5_priv *priv = dev->data->dev_private;
3356
3357         if (!(action->conf))
3358                 return rte_flow_error_set(error, EINVAL,
3359                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3360                                           "configuration cannot be null");
3361         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3362                 return rte_flow_error_set(error, EINVAL,
3363                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3364                                           "can only have a single encap action "
3365                                           "in a flow");
3366         if (!attr->transfer && priv->representor)
3367                 return rte_flow_error_set(error, ENOTSUP,
3368                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3369                                           "encap action for VF representor "
3370                                           "not supported on NIC table");
3371         return 0;
3372 }
3373
3374 /**
3375  * Validate a decap action.
3376  *
3377  * @param[in] dev
3378  *   Pointer to the rte_eth_dev structure.
3379  * @param[in] action_flags
3380  *   Holds the actions detected until now.
3381  * @param[in] action
3382  *   Pointer to the action structure.
3383  * @param[in] item_flags
3384  *   Holds the items detected.
3385  * @param[in] attr
3386  *   Pointer to flow attributes
3387  * @param[out] error
3388  *   Pointer to error structure.
3389  *
3390  * @return
3391  *   0 on success, a negative errno value otherwise and rte_errno is set.
3392  */
3393 static int
3394 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3395                               uint64_t action_flags,
3396                               const struct rte_flow_action *action,
3397                               const uint64_t item_flags,
3398                               const struct rte_flow_attr *attr,
3399                               struct rte_flow_error *error)
3400 {
3401         const struct mlx5_priv *priv = dev->data->dev_private;
3402
3403         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
3404             !priv->config.decap_en)
3405                 return rte_flow_error_set(error, ENOTSUP,
3406                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3407                                           "decap is not enabled");
3408         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3409                 return rte_flow_error_set(error, ENOTSUP,
3410                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3411                                           action_flags &
3412                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3413                                           "have a single decap action" : "decap "
3414                                           "after encap is not supported");
3415         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3416                 return rte_flow_error_set(error, EINVAL,
3417                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3418                                           "can't have decap action after"
3419                                           " modify action");
3420         if (attr->egress)
3421                 return rte_flow_error_set(error, ENOTSUP,
3422                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3423                                           NULL,
3424                                           "decap action not supported for "
3425                                           "egress");
3426         if (!attr->transfer && priv->representor)
3427                 return rte_flow_error_set(error, ENOTSUP,
3428                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3429                                           "decap action for VF representor "
3430                                           "not supported on NIC table");
3431         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3432             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3433                 return rte_flow_error_set(error, ENOTSUP,
3434                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3435                                 "VXLAN item should be present for VXLAN decap");
3436         return 0;
3437 }
3438
3439 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3440
3441 /**
3442  * Validate the raw encap and decap actions.
3443  *
3444  * @param[in] dev
3445  *   Pointer to the rte_eth_dev structure.
3446  * @param[in] decap
3447  *   Pointer to the decap action.
3448  * @param[in] encap
3449  *   Pointer to the encap action.
3450  * @param[in] attr
3451  *   Pointer to flow attributes
3452  * @param[in/out] action_flags
3453  *   Holds the actions detected until now.
3454  * @param[out] actions_n
3455  *   pointer to the number of actions counter.
3456  * @param[in] action
3457  *   Pointer to the action structure.
3458  * @param[in] item_flags
3459  *   Holds the items detected.
3460  * @param[out] error
3461  *   Pointer to error structure.
3462  *
3463  * @return
3464  *   0 on success, a negative errno value otherwise and rte_errno is set.
3465  */
3466 static int
3467 flow_dv_validate_action_raw_encap_decap
3468         (struct rte_eth_dev *dev,
3469          const struct rte_flow_action_raw_decap *decap,
3470          const struct rte_flow_action_raw_encap *encap,
3471          const struct rte_flow_attr *attr, uint64_t *action_flags,
3472          int *actions_n, const struct rte_flow_action *action,
3473          uint64_t item_flags, struct rte_flow_error *error)
3474 {
3475         const struct mlx5_priv *priv = dev->data->dev_private;
3476         int ret;
3477
3478         if (encap && (!encap->size || !encap->data))
3479                 return rte_flow_error_set(error, EINVAL,
3480                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3481                                           "raw encap data cannot be empty");
3482         if (decap && encap) {
3483                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3484                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3485                         /* L3 encap. */
3486                         decap = NULL;
3487                 else if (encap->size <=
3488                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3489                            decap->size >
3490                            MLX5_ENCAPSULATION_DECISION_SIZE)
3491                         /* L3 decap. */
3492                         encap = NULL;
3493                 else if (encap->size >
3494                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3495                            decap->size >
3496                            MLX5_ENCAPSULATION_DECISION_SIZE)
3497                         /* 2 L2 actions: encap and decap. */
3498                         ;
3499                 else
3500                         return rte_flow_error_set(error,
3501                                 ENOTSUP,
3502                                 RTE_FLOW_ERROR_TYPE_ACTION,
3503                                 NULL, "unsupported too small "
3504                                 "raw decap and too small raw "
3505                                 "encap combination");
3506         }
3507         if (decap) {
3508                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3509                                                     item_flags, attr, error);
3510                 if (ret < 0)
3511                         return ret;
3512                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3513                 ++(*actions_n);
3514         }
3515         if (encap) {
3516                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3517                         return rte_flow_error_set(error, ENOTSUP,
3518                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3519                                                   NULL,
3520                                                   "small raw encap size");
3521                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3522                         return rte_flow_error_set(error, EINVAL,
3523                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3524                                                   NULL,
3525                                                   "more than one encap action");
3526                 if (!attr->transfer && priv->representor)
3527                         return rte_flow_error_set
3528                                         (error, ENOTSUP,
3529                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3530                                          "encap action for VF representor "
3531                                          "not supported on NIC table");
3532                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3533                 ++(*actions_n);
3534         }
3535         return 0;
3536 }
3537
3538 /*
3539  * Validate the ASO CT action.
3540  *
3541  * @param[in] dev
3542  *   Pointer to the rte_eth_dev structure.
3543  * @param[in] action_flags
3544  *   Holds the actions detected until now.
3545  * @param[in] item_flags
3546  *   The items found in this flow rule.
3547  * @param[in] attr
3548  *   Pointer to flow attributes.
3549  * @param[out] error
3550  *   Pointer to error structure.
3551  *
3552  * @return
3553  *   0 on success, a negative errno value otherwise and rte_errno is set.
3554  */
3555 static int
3556 flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3557                                uint64_t action_flags,
3558                                uint64_t item_flags,
3559                                const struct rte_flow_attr *attr,
3560                                struct rte_flow_error *error)
3561 {
3562         RTE_SET_USED(dev);
3563
3564         if (attr->group == 0 && !attr->transfer)
3565                 return rte_flow_error_set(error, ENOTSUP,
3566                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3567                                           NULL,
3568                                           "Only support non-root table");
3569         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
3570                 return rte_flow_error_set(error, ENOTSUP,
3571                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3572                                           "CT cannot follow a fate action");
3573         if ((action_flags & MLX5_FLOW_ACTION_METER) ||
3574             (action_flags & MLX5_FLOW_ACTION_AGE))
3575                 return rte_flow_error_set(error, EINVAL,
3576                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3577                                           "Only one ASO action is supported");
3578         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3579                 return rte_flow_error_set(error, EINVAL,
3580                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3581                                           "Encap cannot exist before CT");
3582         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3583                 return rte_flow_error_set(error, EINVAL,
3584                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3585                                           "Not a outer TCP packet");
3586         return 0;
3587 }
3588
3589 int
3590 flow_dv_encap_decap_match_cb(void *tool_ctx __rte_unused,
3591                              struct mlx5_list_entry *entry, void *cb_ctx)
3592 {
3593         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3594         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3595         struct mlx5_flow_dv_encap_decap_resource *resource;
3596
3597         resource = container_of(entry, struct mlx5_flow_dv_encap_decap_resource,
3598                                 entry);
3599         if (resource->reformat_type == ctx_resource->reformat_type &&
3600             resource->ft_type == ctx_resource->ft_type &&
3601             resource->flags == ctx_resource->flags &&
3602             resource->size == ctx_resource->size &&
3603             !memcmp((const void *)resource->buf,
3604                     (const void *)ctx_resource->buf,
3605                     resource->size))
3606                 return 0;
3607         return -1;
3608 }
3609
3610 struct mlx5_list_entry *
3611 flow_dv_encap_decap_create_cb(void *tool_ctx, void *cb_ctx)
3612 {
3613         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3614         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3615         struct mlx5dv_dr_domain *domain;
3616         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3617         struct mlx5_flow_dv_encap_decap_resource *resource;
3618         uint32_t idx;
3619         int ret;
3620
3621         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3622                 domain = sh->fdb_domain;
3623         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3624                 domain = sh->rx_domain;
3625         else
3626                 domain = sh->tx_domain;
3627         /* Register new encap/decap resource. */
3628         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &idx);
3629         if (!resource) {
3630                 rte_flow_error_set(ctx->error, ENOMEM,
3631                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3632                                    "cannot allocate resource memory");
3633                 return NULL;
3634         }
3635         *resource = *ctx_resource;
3636         resource->idx = idx;
3637         ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->ctx, domain,
3638                                                               resource,
3639                                                              &resource->action);
3640         if (ret) {
3641                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3642                 rte_flow_error_set(ctx->error, ENOMEM,
3643                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3644                                    NULL, "cannot create action");
3645                 return NULL;
3646         }
3647
3648         return &resource->entry;
3649 }
3650
3651 struct mlx5_list_entry *
3652 flow_dv_encap_decap_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
3653                              void *cb_ctx)
3654 {
3655         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3656         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3657         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3658         uint32_t idx;
3659
3660         cache_resource = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3661                                            &idx);
3662         if (!cache_resource) {
3663                 rte_flow_error_set(ctx->error, ENOMEM,
3664                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3665                                    "cannot allocate resource memory");
3666                 return NULL;
3667         }
3668         memcpy(cache_resource, oentry, sizeof(*cache_resource));
3669         cache_resource->idx = idx;
3670         return &cache_resource->entry;
3671 }
3672
3673 void
3674 flow_dv_encap_decap_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3675 {
3676         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3677         struct mlx5_flow_dv_encap_decap_resource *res =
3678                                        container_of(entry, typeof(*res), entry);
3679
3680         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
3681 }
3682
3683 /**
3684  * Find existing encap/decap resource or create and register a new one.
3685  *
3686  * @param[in, out] dev
3687  *   Pointer to rte_eth_dev structure.
3688  * @param[in, out] resource
3689  *   Pointer to encap/decap resource.
3690  * @parm[in, out] dev_flow
3691  *   Pointer to the dev_flow.
3692  * @param[out] error
3693  *   pointer to error structure.
3694  *
3695  * @return
3696  *   0 on success otherwise -errno and errno is set.
3697  */
3698 static int
3699 flow_dv_encap_decap_resource_register
3700                         (struct rte_eth_dev *dev,
3701                          struct mlx5_flow_dv_encap_decap_resource *resource,
3702                          struct mlx5_flow *dev_flow,
3703                          struct rte_flow_error *error)
3704 {
3705         struct mlx5_priv *priv = dev->data->dev_private;
3706         struct mlx5_dev_ctx_shared *sh = priv->sh;
3707         struct mlx5_list_entry *entry;
3708         union {
3709                 struct {
3710                         uint32_t ft_type:8;
3711                         uint32_t refmt_type:8;
3712                         /*
3713                          * Header reformat actions can be shared between
3714                          * non-root tables. One bit to indicate non-root
3715                          * table or not.
3716                          */
3717                         uint32_t is_root:1;
3718                         uint32_t reserve:15;
3719                 };
3720                 uint32_t v32;
3721         } encap_decap_key = {
3722                 {
3723                         .ft_type = resource->ft_type,
3724                         .refmt_type = resource->reformat_type,
3725                         .is_root = !!dev_flow->dv.group,
3726                         .reserve = 0,
3727                 }
3728         };
3729         struct mlx5_flow_cb_ctx ctx = {
3730                 .error = error,
3731                 .data = resource,
3732         };
3733         uint64_t key64;
3734
3735         resource->flags = dev_flow->dv.group ? 0 : 1;
3736         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3737                                  sizeof(encap_decap_key.v32), 0);
3738         if (resource->reformat_type !=
3739             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3740             resource->size)
3741                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3742         entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx);
3743         if (!entry)
3744                 return -rte_errno;
3745         resource = container_of(entry, typeof(*resource), entry);
3746         dev_flow->dv.encap_decap = resource;
3747         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3748         return 0;
3749 }
3750
3751 /**
3752  * Find existing table jump resource or create and register a new one.
3753  *
3754  * @param[in, out] dev
3755  *   Pointer to rte_eth_dev structure.
3756  * @param[in, out] tbl
3757  *   Pointer to flow table resource.
3758  * @parm[in, out] dev_flow
3759  *   Pointer to the dev_flow.
3760  * @param[out] error
3761  *   pointer to error structure.
3762  *
3763  * @return
3764  *   0 on success otherwise -errno and errno is set.
3765  */
3766 static int
3767 flow_dv_jump_tbl_resource_register
3768                         (struct rte_eth_dev *dev __rte_unused,
3769                          struct mlx5_flow_tbl_resource *tbl,
3770                          struct mlx5_flow *dev_flow,
3771                          struct rte_flow_error *error __rte_unused)
3772 {
3773         struct mlx5_flow_tbl_data_entry *tbl_data =
3774                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3775
3776         MLX5_ASSERT(tbl);
3777         MLX5_ASSERT(tbl_data->jump.action);
3778         dev_flow->handle->rix_jump = tbl_data->idx;
3779         dev_flow->dv.jump = &tbl_data->jump;
3780         return 0;
3781 }
3782
3783 int
3784 flow_dv_port_id_match_cb(void *tool_ctx __rte_unused,
3785                          struct mlx5_list_entry *entry, void *cb_ctx)
3786 {
3787         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3788         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3789         struct mlx5_flow_dv_port_id_action_resource *res =
3790                                        container_of(entry, typeof(*res), entry);
3791
3792         return ref->port_id != res->port_id;
3793 }
3794
3795 struct mlx5_list_entry *
3796 flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx)
3797 {
3798         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3799         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3800         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3801         struct mlx5_flow_dv_port_id_action_resource *resource;
3802         uint32_t idx;
3803         int ret;
3804
3805         /* Register new port id action resource. */
3806         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3807         if (!resource) {
3808                 rte_flow_error_set(ctx->error, ENOMEM,
3809                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3810                                    "cannot allocate port_id action memory");
3811                 return NULL;
3812         }
3813         *resource = *ref;
3814         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3815                                                         ref->port_id,
3816                                                         &resource->action);
3817         if (ret) {
3818                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3819                 rte_flow_error_set(ctx->error, ENOMEM,
3820                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3821                                    "cannot create action");
3822                 return NULL;
3823         }
3824         resource->idx = idx;
3825         return &resource->entry;
3826 }
3827
3828 struct mlx5_list_entry *
3829 flow_dv_port_id_clone_cb(void *tool_ctx,
3830                          struct mlx5_list_entry *entry __rte_unused,
3831                          void *cb_ctx)
3832 {
3833         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3834         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3835         struct mlx5_flow_dv_port_id_action_resource *resource;
3836         uint32_t idx;
3837
3838         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3839         if (!resource) {
3840                 rte_flow_error_set(ctx->error, ENOMEM,
3841                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3842                                    "cannot allocate port_id action memory");
3843                 return NULL;
3844         }
3845         memcpy(resource, entry, sizeof(*resource));
3846         resource->idx = idx;
3847         return &resource->entry;
3848 }
3849
3850 void
3851 flow_dv_port_id_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3852 {
3853         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3854         struct mlx5_flow_dv_port_id_action_resource *resource =
3855                                   container_of(entry, typeof(*resource), entry);
3856
3857         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
3858 }
3859
3860 /**
3861  * Find existing table port ID resource or create and register a new one.
3862  *
3863  * @param[in, out] dev
3864  *   Pointer to rte_eth_dev structure.
3865  * @param[in, out] ref
3866  *   Pointer to port ID action resource reference.
3867  * @parm[in, out] dev_flow
3868  *   Pointer to the dev_flow.
3869  * @param[out] error
3870  *   pointer to error structure.
3871  *
3872  * @return
3873  *   0 on success otherwise -errno and errno is set.
3874  */
3875 static int
3876 flow_dv_port_id_action_resource_register
3877                         (struct rte_eth_dev *dev,
3878                          struct mlx5_flow_dv_port_id_action_resource *ref,
3879                          struct mlx5_flow *dev_flow,
3880                          struct rte_flow_error *error)
3881 {
3882         struct mlx5_priv *priv = dev->data->dev_private;
3883         struct mlx5_list_entry *entry;
3884         struct mlx5_flow_dv_port_id_action_resource *resource;
3885         struct mlx5_flow_cb_ctx ctx = {
3886                 .error = error,
3887                 .data = ref,
3888         };
3889
3890         entry = mlx5_list_register(priv->sh->port_id_action_list, &ctx);
3891         if (!entry)
3892                 return -rte_errno;
3893         resource = container_of(entry, typeof(*resource), entry);
3894         dev_flow->dv.port_id_action = resource;
3895         dev_flow->handle->rix_port_id_action = resource->idx;
3896         return 0;
3897 }
3898
3899 int
3900 flow_dv_push_vlan_match_cb(void *tool_ctx __rte_unused,
3901                            struct mlx5_list_entry *entry, void *cb_ctx)
3902 {
3903         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3904         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3905         struct mlx5_flow_dv_push_vlan_action_resource *res =
3906                                        container_of(entry, typeof(*res), entry);
3907
3908         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3909 }
3910
3911 struct mlx5_list_entry *
3912 flow_dv_push_vlan_create_cb(void *tool_ctx, void *cb_ctx)
3913 {
3914         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3915         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3916         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3917         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3918         struct mlx5dv_dr_domain *domain;
3919         uint32_t idx;
3920         int ret;
3921
3922         /* Register new port id action resource. */
3923         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3924         if (!resource) {
3925                 rte_flow_error_set(ctx->error, ENOMEM,
3926                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3927                                    "cannot allocate push_vlan action memory");
3928                 return NULL;
3929         }
3930         *resource = *ref;
3931         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3932                 domain = sh->fdb_domain;
3933         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3934                 domain = sh->rx_domain;
3935         else
3936                 domain = sh->tx_domain;
3937         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3938                                                         &resource->action);
3939         if (ret) {
3940                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3941                 rte_flow_error_set(ctx->error, ENOMEM,
3942                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3943                                    "cannot create push vlan action");
3944                 return NULL;
3945         }
3946         resource->idx = idx;
3947         return &resource->entry;
3948 }
3949
3950 struct mlx5_list_entry *
3951 flow_dv_push_vlan_clone_cb(void *tool_ctx,
3952                            struct mlx5_list_entry *entry __rte_unused,
3953                            void *cb_ctx)
3954 {
3955         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3956         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3957         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3958         uint32_t idx;
3959
3960         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3961         if (!resource) {
3962                 rte_flow_error_set(ctx->error, ENOMEM,
3963                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3964                                    "cannot allocate push_vlan action memory");
3965                 return NULL;
3966         }
3967         memcpy(resource, entry, sizeof(*resource));
3968         resource->idx = idx;
3969         return &resource->entry;
3970 }
3971
3972 void
3973 flow_dv_push_vlan_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3974 {
3975         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3976         struct mlx5_flow_dv_push_vlan_action_resource *resource =
3977                                   container_of(entry, typeof(*resource), entry);
3978
3979         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
3980 }
3981
3982 /**
3983  * Find existing push vlan resource or create and register a new one.
3984  *
3985  * @param [in, out] dev
3986  *   Pointer to rte_eth_dev structure.
3987  * @param[in, out] ref
3988  *   Pointer to port ID action resource reference.
3989  * @parm[in, out] dev_flow
3990  *   Pointer to the dev_flow.
3991  * @param[out] error
3992  *   pointer to error structure.
3993  *
3994  * @return
3995  *   0 on success otherwise -errno and errno is set.
3996  */
3997 static int
3998 flow_dv_push_vlan_action_resource_register
3999                        (struct rte_eth_dev *dev,
4000                         struct mlx5_flow_dv_push_vlan_action_resource *ref,
4001                         struct mlx5_flow *dev_flow,
4002                         struct rte_flow_error *error)
4003 {
4004         struct mlx5_priv *priv = dev->data->dev_private;
4005         struct mlx5_flow_dv_push_vlan_action_resource *resource;
4006         struct mlx5_list_entry *entry;
4007         struct mlx5_flow_cb_ctx ctx = {
4008                 .error = error,
4009                 .data = ref,
4010         };
4011
4012         entry = mlx5_list_register(priv->sh->push_vlan_action_list, &ctx);
4013         if (!entry)
4014                 return -rte_errno;
4015         resource = container_of(entry, typeof(*resource), entry);
4016
4017         dev_flow->handle->dvh.rix_push_vlan = resource->idx;
4018         dev_flow->dv.push_vlan_res = resource;
4019         return 0;
4020 }
4021
4022 /**
4023  * Get the size of specific rte_flow_item_type hdr size
4024  *
4025  * @param[in] item_type
4026  *   Tested rte_flow_item_type.
4027  *
4028  * @return
4029  *   sizeof struct item_type, 0 if void or irrelevant.
4030  */
4031 static size_t
4032 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
4033 {
4034         size_t retval;
4035
4036         switch (item_type) {
4037         case RTE_FLOW_ITEM_TYPE_ETH:
4038                 retval = sizeof(struct rte_ether_hdr);
4039                 break;
4040         case RTE_FLOW_ITEM_TYPE_VLAN:
4041                 retval = sizeof(struct rte_vlan_hdr);
4042                 break;
4043         case RTE_FLOW_ITEM_TYPE_IPV4:
4044                 retval = sizeof(struct rte_ipv4_hdr);
4045                 break;
4046         case RTE_FLOW_ITEM_TYPE_IPV6:
4047                 retval = sizeof(struct rte_ipv6_hdr);
4048                 break;
4049         case RTE_FLOW_ITEM_TYPE_UDP:
4050                 retval = sizeof(struct rte_udp_hdr);
4051                 break;
4052         case RTE_FLOW_ITEM_TYPE_TCP:
4053                 retval = sizeof(struct rte_tcp_hdr);
4054                 break;
4055         case RTE_FLOW_ITEM_TYPE_VXLAN:
4056         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4057                 retval = sizeof(struct rte_vxlan_hdr);
4058                 break;
4059         case RTE_FLOW_ITEM_TYPE_GRE:
4060         case RTE_FLOW_ITEM_TYPE_NVGRE:
4061                 retval = sizeof(struct rte_gre_hdr);
4062                 break;
4063         case RTE_FLOW_ITEM_TYPE_MPLS:
4064                 retval = sizeof(struct rte_mpls_hdr);
4065                 break;
4066         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
4067         default:
4068                 retval = 0;
4069                 break;
4070         }
4071         return retval;
4072 }
4073
4074 #define MLX5_ENCAP_IPV4_VERSION         0x40
4075 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
4076 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
4077 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
4078 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
4079 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
4080 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
4081
4082 /**
4083  * Convert the encap action data from list of rte_flow_item to raw buffer
4084  *
4085  * @param[in] items
4086  *   Pointer to rte_flow_item objects list.
4087  * @param[out] buf
4088  *   Pointer to the output buffer.
4089  * @param[out] size
4090  *   Pointer to the output buffer size.
4091  * @param[out] error
4092  *   Pointer to the error structure.
4093  *
4094  * @return
4095  *   0 on success, a negative errno value otherwise and rte_errno is set.
4096  */
4097 static int
4098 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
4099                            size_t *size, struct rte_flow_error *error)
4100 {
4101         struct rte_ether_hdr *eth = NULL;
4102         struct rte_vlan_hdr *vlan = NULL;
4103         struct rte_ipv4_hdr *ipv4 = NULL;
4104         struct rte_ipv6_hdr *ipv6 = NULL;
4105         struct rte_udp_hdr *udp = NULL;
4106         struct rte_vxlan_hdr *vxlan = NULL;
4107         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
4108         struct rte_gre_hdr *gre = NULL;
4109         size_t len;
4110         size_t temp_size = 0;
4111
4112         if (!items)
4113                 return rte_flow_error_set(error, EINVAL,
4114                                           RTE_FLOW_ERROR_TYPE_ACTION,
4115                                           NULL, "invalid empty data");
4116         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4117                 len = flow_dv_get_item_hdr_len(items->type);
4118                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
4119                         return rte_flow_error_set(error, EINVAL,
4120                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4121                                                   (void *)items->type,
4122                                                   "items total size is too big"
4123                                                   " for encap action");
4124                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
4125                 switch (items->type) {
4126                 case RTE_FLOW_ITEM_TYPE_ETH:
4127                         eth = (struct rte_ether_hdr *)&buf[temp_size];
4128                         break;
4129                 case RTE_FLOW_ITEM_TYPE_VLAN:
4130                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
4131                         if (!eth)
4132                                 return rte_flow_error_set(error, EINVAL,
4133                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4134                                                 (void *)items->type,
4135                                                 "eth header not found");
4136                         if (!eth->ether_type)
4137                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
4138                         break;
4139                 case RTE_FLOW_ITEM_TYPE_IPV4:
4140                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
4141                         if (!vlan && !eth)
4142                                 return rte_flow_error_set(error, EINVAL,
4143                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4144                                                 (void *)items->type,
4145                                                 "neither eth nor vlan"
4146                                                 " header found");
4147                         if (vlan && !vlan->eth_proto)
4148                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4149                         else if (eth && !eth->ether_type)
4150                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4151                         if (!ipv4->version_ihl)
4152                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
4153                                                     MLX5_ENCAP_IPV4_IHL_MIN;
4154                         if (!ipv4->time_to_live)
4155                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
4156                         break;
4157                 case RTE_FLOW_ITEM_TYPE_IPV6:
4158                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
4159                         if (!vlan && !eth)
4160                                 return rte_flow_error_set(error, EINVAL,
4161                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4162                                                 (void *)items->type,
4163                                                 "neither eth nor vlan"
4164                                                 " header found");
4165                         if (vlan && !vlan->eth_proto)
4166                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4167                         else if (eth && !eth->ether_type)
4168                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4169                         if (!ipv6->vtc_flow)
4170                                 ipv6->vtc_flow =
4171                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
4172                         if (!ipv6->hop_limits)
4173                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
4174                         break;
4175                 case RTE_FLOW_ITEM_TYPE_UDP:
4176                         udp = (struct rte_udp_hdr *)&buf[temp_size];
4177                         if (!ipv4 && !ipv6)
4178                                 return rte_flow_error_set(error, EINVAL,
4179                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4180                                                 (void *)items->type,
4181                                                 "ip header not found");
4182                         if (ipv4 && !ipv4->next_proto_id)
4183                                 ipv4->next_proto_id = IPPROTO_UDP;
4184                         else if (ipv6 && !ipv6->proto)
4185                                 ipv6->proto = IPPROTO_UDP;
4186                         break;
4187                 case RTE_FLOW_ITEM_TYPE_VXLAN:
4188                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
4189                         if (!udp)
4190                                 return rte_flow_error_set(error, EINVAL,
4191                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4192                                                 (void *)items->type,
4193                                                 "udp header not found");
4194                         if (!udp->dst_port)
4195                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
4196                         if (!vxlan->vx_flags)
4197                                 vxlan->vx_flags =
4198                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
4199                         break;
4200                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4201                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
4202                         if (!udp)
4203                                 return rte_flow_error_set(error, EINVAL,
4204                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4205                                                 (void *)items->type,
4206                                                 "udp header not found");
4207                         if (!vxlan_gpe->proto)
4208                                 return rte_flow_error_set(error, EINVAL,
4209                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4210                                                 (void *)items->type,
4211                                                 "next protocol not found");
4212                         if (!udp->dst_port)
4213                                 udp->dst_port =
4214                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
4215                         if (!vxlan_gpe->vx_flags)
4216                                 vxlan_gpe->vx_flags =
4217                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
4218                         break;
4219                 case RTE_FLOW_ITEM_TYPE_GRE:
4220                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4221                         gre = (struct rte_gre_hdr *)&buf[temp_size];
4222                         if (!gre->proto)
4223                                 return rte_flow_error_set(error, EINVAL,
4224                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4225                                                 (void *)items->type,
4226                                                 "next protocol not found");
4227                         if (!ipv4 && !ipv6)
4228                                 return rte_flow_error_set(error, EINVAL,
4229                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4230                                                 (void *)items->type,
4231                                                 "ip header not found");
4232                         if (ipv4 && !ipv4->next_proto_id)
4233                                 ipv4->next_proto_id = IPPROTO_GRE;
4234                         else if (ipv6 && !ipv6->proto)
4235                                 ipv6->proto = IPPROTO_GRE;
4236                         break;
4237                 case RTE_FLOW_ITEM_TYPE_VOID:
4238                         break;
4239                 default:
4240                         return rte_flow_error_set(error, EINVAL,
4241                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4242                                                   (void *)items->type,
4243                                                   "unsupported item type");
4244                         break;
4245                 }
4246                 temp_size += len;
4247         }
4248         *size = temp_size;
4249         return 0;
4250 }
4251
4252 static int
4253 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4254 {
4255         struct rte_ether_hdr *eth = NULL;
4256         struct rte_vlan_hdr *vlan = NULL;
4257         struct rte_ipv6_hdr *ipv6 = NULL;
4258         struct rte_udp_hdr *udp = NULL;
4259         char *next_hdr;
4260         uint16_t proto;
4261
4262         eth = (struct rte_ether_hdr *)data;
4263         next_hdr = (char *)(eth + 1);
4264         proto = RTE_BE16(eth->ether_type);
4265
4266         /* VLAN skipping */
4267         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4268                 vlan = (struct rte_vlan_hdr *)next_hdr;
4269                 proto = RTE_BE16(vlan->eth_proto);
4270                 next_hdr += sizeof(struct rte_vlan_hdr);
4271         }
4272
4273         /* HW calculates IPv4 csum. no need to proceed */
4274         if (proto == RTE_ETHER_TYPE_IPV4)
4275                 return 0;
4276
4277         /* non IPv4/IPv6 header. not supported */
4278         if (proto != RTE_ETHER_TYPE_IPV6) {
4279                 return rte_flow_error_set(error, ENOTSUP,
4280                                           RTE_FLOW_ERROR_TYPE_ACTION,
4281                                           NULL, "Cannot offload non IPv4/IPv6");
4282         }
4283
4284         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4285
4286         /* ignore non UDP */
4287         if (ipv6->proto != IPPROTO_UDP)
4288                 return 0;
4289
4290         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4291         udp->dgram_cksum = 0;
4292
4293         return 0;
4294 }
4295
4296 /**
4297  * Convert L2 encap action to DV specification.
4298  *
4299  * @param[in] dev
4300  *   Pointer to rte_eth_dev structure.
4301  * @param[in] action
4302  *   Pointer to action structure.
4303  * @param[in, out] dev_flow
4304  *   Pointer to the mlx5_flow.
4305  * @param[in] transfer
4306  *   Mark if the flow is E-Switch flow.
4307  * @param[out] error
4308  *   Pointer to the error structure.
4309  *
4310  * @return
4311  *   0 on success, a negative errno value otherwise and rte_errno is set.
4312  */
4313 static int
4314 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4315                                const struct rte_flow_action *action,
4316                                struct mlx5_flow *dev_flow,
4317                                uint8_t transfer,
4318                                struct rte_flow_error *error)
4319 {
4320         const struct rte_flow_item *encap_data;
4321         const struct rte_flow_action_raw_encap *raw_encap_data;
4322         struct mlx5_flow_dv_encap_decap_resource res = {
4323                 .reformat_type =
4324                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4325                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4326                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4327         };
4328
4329         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4330                 raw_encap_data =
4331                         (const struct rte_flow_action_raw_encap *)action->conf;
4332                 res.size = raw_encap_data->size;
4333                 memcpy(res.buf, raw_encap_data->data, res.size);
4334         } else {
4335                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4336                         encap_data =
4337                                 ((const struct rte_flow_action_vxlan_encap *)
4338                                                 action->conf)->definition;
4339                 else
4340                         encap_data =
4341                                 ((const struct rte_flow_action_nvgre_encap *)
4342                                                 action->conf)->definition;
4343                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4344                                                &res.size, error))
4345                         return -rte_errno;
4346         }
4347         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4348                 return -rte_errno;
4349         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4350                 return rte_flow_error_set(error, EINVAL,
4351                                           RTE_FLOW_ERROR_TYPE_ACTION,
4352                                           NULL, "can't create L2 encap action");
4353         return 0;
4354 }
4355
4356 /**
4357  * Convert L2 decap action to DV specification.
4358  *
4359  * @param[in] dev
4360  *   Pointer to rte_eth_dev structure.
4361  * @param[in, out] dev_flow
4362  *   Pointer to the mlx5_flow.
4363  * @param[in] transfer
4364  *   Mark if the flow is E-Switch flow.
4365  * @param[out] error
4366  *   Pointer to the error structure.
4367  *
4368  * @return
4369  *   0 on success, a negative errno value otherwise and rte_errno is set.
4370  */
4371 static int
4372 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4373                                struct mlx5_flow *dev_flow,
4374                                uint8_t transfer,
4375                                struct rte_flow_error *error)
4376 {
4377         struct mlx5_flow_dv_encap_decap_resource res = {
4378                 .size = 0,
4379                 .reformat_type =
4380                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4381                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4382                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4383         };
4384
4385         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4386                 return rte_flow_error_set(error, EINVAL,
4387                                           RTE_FLOW_ERROR_TYPE_ACTION,
4388                                           NULL, "can't create L2 decap action");
4389         return 0;
4390 }
4391
4392 /**
4393  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4394  *
4395  * @param[in] dev
4396  *   Pointer to rte_eth_dev structure.
4397  * @param[in] action
4398  *   Pointer to action structure.
4399  * @param[in, out] dev_flow
4400  *   Pointer to the mlx5_flow.
4401  * @param[in] attr
4402  *   Pointer to the flow attributes.
4403  * @param[out] error
4404  *   Pointer to the error structure.
4405  *
4406  * @return
4407  *   0 on success, a negative errno value otherwise and rte_errno is set.
4408  */
4409 static int
4410 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4411                                 const struct rte_flow_action *action,
4412                                 struct mlx5_flow *dev_flow,
4413                                 const struct rte_flow_attr *attr,
4414                                 struct rte_flow_error *error)
4415 {
4416         const struct rte_flow_action_raw_encap *encap_data;
4417         struct mlx5_flow_dv_encap_decap_resource res;
4418
4419         memset(&res, 0, sizeof(res));
4420         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4421         res.size = encap_data->size;
4422         memcpy(res.buf, encap_data->data, res.size);
4423         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4424                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4425                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4426         if (attr->transfer)
4427                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4428         else
4429                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4430                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4431         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4432                 return rte_flow_error_set(error, EINVAL,
4433                                           RTE_FLOW_ERROR_TYPE_ACTION,
4434                                           NULL, "can't create encap action");
4435         return 0;
4436 }
4437
4438 /**
4439  * Create action push VLAN.
4440  *
4441  * @param[in] dev
4442  *   Pointer to rte_eth_dev structure.
4443  * @param[in] attr
4444  *   Pointer to the flow attributes.
4445  * @param[in] vlan
4446  *   Pointer to the vlan to push to the Ethernet header.
4447  * @param[in, out] dev_flow
4448  *   Pointer to the mlx5_flow.
4449  * @param[out] error
4450  *   Pointer to the error structure.
4451  *
4452  * @return
4453  *   0 on success, a negative errno value otherwise and rte_errno is set.
4454  */
4455 static int
4456 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4457                                 const struct rte_flow_attr *attr,
4458                                 const struct rte_vlan_hdr *vlan,
4459                                 struct mlx5_flow *dev_flow,
4460                                 struct rte_flow_error *error)
4461 {
4462         struct mlx5_flow_dv_push_vlan_action_resource res;
4463
4464         memset(&res, 0, sizeof(res));
4465         res.vlan_tag =
4466                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4467                                  vlan->vlan_tci);
4468         if (attr->transfer)
4469                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4470         else
4471                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4472                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4473         return flow_dv_push_vlan_action_resource_register
4474                                             (dev, &res, dev_flow, error);
4475 }
4476
4477 /**
4478  * Validate the modify-header actions.
4479  *
4480  * @param[in] action_flags
4481  *   Holds the actions detected until now.
4482  * @param[in] action
4483  *   Pointer to the modify action.
4484  * @param[out] error
4485  *   Pointer to error structure.
4486  *
4487  * @return
4488  *   0 on success, a negative errno value otherwise and rte_errno is set.
4489  */
4490 static int
4491 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4492                                    const struct rte_flow_action *action,
4493                                    struct rte_flow_error *error)
4494 {
4495         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4496                 return rte_flow_error_set(error, EINVAL,
4497                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4498                                           NULL, "action configuration not set");
4499         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4500                 return rte_flow_error_set(error, EINVAL,
4501                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4502                                           "can't have encap action before"
4503                                           " modify action");
4504         return 0;
4505 }
4506
4507 /**
4508  * Validate the modify-header MAC address actions.
4509  *
4510  * @param[in] action_flags
4511  *   Holds the actions detected until now.
4512  * @param[in] action
4513  *   Pointer to the modify action.
4514  * @param[in] item_flags
4515  *   Holds the items detected.
4516  * @param[out] error
4517  *   Pointer to error structure.
4518  *
4519  * @return
4520  *   0 on success, a negative errno value otherwise and rte_errno is set.
4521  */
4522 static int
4523 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4524                                    const struct rte_flow_action *action,
4525                                    const uint64_t item_flags,
4526                                    struct rte_flow_error *error)
4527 {
4528         int ret = 0;
4529
4530         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4531         if (!ret) {
4532                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4533                         return rte_flow_error_set(error, EINVAL,
4534                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4535                                                   NULL,
4536                                                   "no L2 item in pattern");
4537         }
4538         return ret;
4539 }
4540
4541 /**
4542  * Validate the modify-header IPv4 address actions.
4543  *
4544  * @param[in] action_flags
4545  *   Holds the actions detected until now.
4546  * @param[in] action
4547  *   Pointer to the modify action.
4548  * @param[in] item_flags
4549  *   Holds the items detected.
4550  * @param[out] error
4551  *   Pointer to error structure.
4552  *
4553  * @return
4554  *   0 on success, a negative errno value otherwise and rte_errno is set.
4555  */
4556 static int
4557 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4558                                     const struct rte_flow_action *action,
4559                                     const uint64_t item_flags,
4560                                     struct rte_flow_error *error)
4561 {
4562         int ret = 0;
4563         uint64_t layer;
4564
4565         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4566         if (!ret) {
4567                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4568                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4569                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4570                 if (!(item_flags & layer))
4571                         return rte_flow_error_set(error, EINVAL,
4572                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4573                                                   NULL,
4574                                                   "no ipv4 item in pattern");
4575         }
4576         return ret;
4577 }
4578
4579 /**
4580  * Validate the modify-header IPv6 address actions.
4581  *
4582  * @param[in] action_flags
4583  *   Holds the actions detected until now.
4584  * @param[in] action
4585  *   Pointer to the modify action.
4586  * @param[in] item_flags
4587  *   Holds the items detected.
4588  * @param[out] error
4589  *   Pointer to error structure.
4590  *
4591  * @return
4592  *   0 on success, a negative errno value otherwise and rte_errno is set.
4593  */
4594 static int
4595 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4596                                     const struct rte_flow_action *action,
4597                                     const uint64_t item_flags,
4598                                     struct rte_flow_error *error)
4599 {
4600         int ret = 0;
4601         uint64_t layer;
4602
4603         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4604         if (!ret) {
4605                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4606                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4607                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4608                 if (!(item_flags & layer))
4609                         return rte_flow_error_set(error, EINVAL,
4610                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4611                                                   NULL,
4612                                                   "no ipv6 item in pattern");
4613         }
4614         return ret;
4615 }
4616
4617 /**
4618  * Validate the modify-header TP actions.
4619  *
4620  * @param[in] action_flags
4621  *   Holds the actions detected until now.
4622  * @param[in] action
4623  *   Pointer to the modify action.
4624  * @param[in] item_flags
4625  *   Holds the items detected.
4626  * @param[out] error
4627  *   Pointer to error structure.
4628  *
4629  * @return
4630  *   0 on success, a negative errno value otherwise and rte_errno is set.
4631  */
4632 static int
4633 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4634                                   const struct rte_flow_action *action,
4635                                   const uint64_t item_flags,
4636                                   struct rte_flow_error *error)
4637 {
4638         int ret = 0;
4639         uint64_t layer;
4640
4641         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4642         if (!ret) {
4643                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4644                                  MLX5_FLOW_LAYER_INNER_L4 :
4645                                  MLX5_FLOW_LAYER_OUTER_L4;
4646                 if (!(item_flags & layer))
4647                         return rte_flow_error_set(error, EINVAL,
4648                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4649                                                   NULL, "no transport layer "
4650                                                   "in pattern");
4651         }
4652         return ret;
4653 }
4654
4655 /**
4656  * Validate the modify-header actions of increment/decrement
4657  * TCP Sequence-number.
4658  *
4659  * @param[in] action_flags
4660  *   Holds the actions detected until now.
4661  * @param[in] action
4662  *   Pointer to the modify action.
4663  * @param[in] item_flags
4664  *   Holds the items detected.
4665  * @param[out] error
4666  *   Pointer to error structure.
4667  *
4668  * @return
4669  *   0 on success, a negative errno value otherwise and rte_errno is set.
4670  */
4671 static int
4672 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4673                                        const struct rte_flow_action *action,
4674                                        const uint64_t item_flags,
4675                                        struct rte_flow_error *error)
4676 {
4677         int ret = 0;
4678         uint64_t layer;
4679
4680         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4681         if (!ret) {
4682                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4683                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4684                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4685                 if (!(item_flags & layer))
4686                         return rte_flow_error_set(error, EINVAL,
4687                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4688                                                   NULL, "no TCP item in"
4689                                                   " pattern");
4690                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4691                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4692                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4693                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4694                         return rte_flow_error_set(error, EINVAL,
4695                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4696                                                   NULL,
4697                                                   "cannot decrease and increase"
4698                                                   " TCP sequence number"
4699                                                   " at the same time");
4700         }
4701         return ret;
4702 }
4703
4704 /**
4705  * Validate the modify-header actions of increment/decrement
4706  * TCP Acknowledgment number.
4707  *
4708  * @param[in] action_flags
4709  *   Holds the actions detected until now.
4710  * @param[in] action
4711  *   Pointer to the modify action.
4712  * @param[in] item_flags
4713  *   Holds the items detected.
4714  * @param[out] error
4715  *   Pointer to error structure.
4716  *
4717  * @return
4718  *   0 on success, a negative errno value otherwise and rte_errno is set.
4719  */
4720 static int
4721 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4722                                        const struct rte_flow_action *action,
4723                                        const uint64_t item_flags,
4724                                        struct rte_flow_error *error)
4725 {
4726         int ret = 0;
4727         uint64_t layer;
4728
4729         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4730         if (!ret) {
4731                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4732                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4733                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4734                 if (!(item_flags & layer))
4735                         return rte_flow_error_set(error, EINVAL,
4736                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4737                                                   NULL, "no TCP item in"
4738                                                   " pattern");
4739                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4740                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4741                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4742                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4743                         return rte_flow_error_set(error, EINVAL,
4744                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4745                                                   NULL,
4746                                                   "cannot decrease and increase"
4747                                                   " TCP acknowledgment number"
4748                                                   " at the same time");
4749         }
4750         return ret;
4751 }
4752
4753 /**
4754  * Validate the modify-header TTL actions.
4755  *
4756  * @param[in] action_flags
4757  *   Holds the actions detected until now.
4758  * @param[in] action
4759  *   Pointer to the modify action.
4760  * @param[in] item_flags
4761  *   Holds the items detected.
4762  * @param[out] error
4763  *   Pointer to error structure.
4764  *
4765  * @return
4766  *   0 on success, a negative errno value otherwise and rte_errno is set.
4767  */
4768 static int
4769 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4770                                    const struct rte_flow_action *action,
4771                                    const uint64_t item_flags,
4772                                    struct rte_flow_error *error)
4773 {
4774         int ret = 0;
4775         uint64_t layer;
4776
4777         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4778         if (!ret) {
4779                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4780                                  MLX5_FLOW_LAYER_INNER_L3 :
4781                                  MLX5_FLOW_LAYER_OUTER_L3;
4782                 if (!(item_flags & layer))
4783                         return rte_flow_error_set(error, EINVAL,
4784                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4785                                                   NULL,
4786                                                   "no IP protocol in pattern");
4787         }
4788         return ret;
4789 }
4790
4791 /**
4792  * Validate the generic modify field actions.
4793  * @param[in] dev
4794  *   Pointer to the rte_eth_dev structure.
4795  * @param[in] action_flags
4796  *   Holds the actions detected until now.
4797  * @param[in] action
4798  *   Pointer to the modify action.
4799  * @param[in] attr
4800  *   Pointer to the flow attributes.
4801  * @param[out] error
4802  *   Pointer to error structure.
4803  *
4804  * @return
4805  *   Number of header fields to modify (0 or more) on success,
4806  *   a negative errno value otherwise and rte_errno is set.
4807  */
4808 static int
4809 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4810                                    const uint64_t action_flags,
4811                                    const struct rte_flow_action *action,
4812                                    const struct rte_flow_attr *attr,
4813                                    struct rte_flow_error *error)
4814 {
4815         int ret = 0;
4816         struct mlx5_priv *priv = dev->data->dev_private;
4817         struct mlx5_dev_config *config = &priv->config;
4818         const struct rte_flow_action_modify_field *action_modify_field =
4819                 action->conf;
4820         uint32_t dst_width = mlx5_flow_item_field_width(config,
4821                                 action_modify_field->dst.field);
4822         uint32_t src_width = mlx5_flow_item_field_width(config,
4823                                 action_modify_field->src.field);
4824
4825         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4826         if (ret)
4827                 return ret;
4828
4829         if (action_modify_field->width == 0)
4830                 return rte_flow_error_set(error, EINVAL,
4831                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4832                                 "no bits are requested to be modified");
4833         else if (action_modify_field->width > dst_width ||
4834                  action_modify_field->width > src_width)
4835                 return rte_flow_error_set(error, EINVAL,
4836                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4837                                 "cannot modify more bits than"
4838                                 " the width of a field");
4839         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4840             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4841                 if ((action_modify_field->dst.offset +
4842                      action_modify_field->width > dst_width) ||
4843                     (action_modify_field->dst.offset % 32))
4844                         return rte_flow_error_set(error, EINVAL,
4845                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4846                                         "destination offset is too big"
4847                                         " or not aligned to 4 bytes");
4848                 if (action_modify_field->dst.level &&
4849                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4850                         return rte_flow_error_set(error, ENOTSUP,
4851                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4852                                         "inner header fields modification"
4853                                         " is not supported");
4854         }
4855         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4856             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4857                 if (!attr->transfer && !attr->group)
4858                         return rte_flow_error_set(error, ENOTSUP,
4859                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4860                                         "modify field action is not"
4861                                         " supported for group 0");
4862                 if ((action_modify_field->src.offset +
4863                      action_modify_field->width > src_width) ||
4864                     (action_modify_field->src.offset % 32))
4865                         return rte_flow_error_set(error, EINVAL,
4866                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4867                                         "source offset is too big"
4868                                         " or not aligned to 4 bytes");
4869                 if (action_modify_field->src.level &&
4870                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4871                         return rte_flow_error_set(error, ENOTSUP,
4872                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4873                                         "inner header fields modification"
4874                                         " is not supported");
4875         }
4876         if ((action_modify_field->dst.field ==
4877              action_modify_field->src.field) &&
4878             (action_modify_field->dst.level ==
4879              action_modify_field->src.level))
4880                 return rte_flow_error_set(error, EINVAL,
4881                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4882                                 "source and destination fields"
4883                                 " cannot be the same");
4884         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4885             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER)
4886                 return rte_flow_error_set(error, EINVAL,
4887                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4888                                 "immediate value or a pointer to it"
4889                                 " cannot be used as a destination");
4890         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4891             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4892                 return rte_flow_error_set(error, ENOTSUP,
4893                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4894                                 "modifications of an arbitrary"
4895                                 " place in a packet is not supported");
4896         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4897             action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4898                 return rte_flow_error_set(error, ENOTSUP,
4899                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4900                                 "modifications of the 802.1Q Tag"
4901                                 " Identifier is not supported");
4902         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4903             action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4904                 return rte_flow_error_set(error, ENOTSUP,
4905                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4906                                 "modifications of the VXLAN Network"
4907                                 " Identifier is not supported");
4908         if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4909             action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4910                 return rte_flow_error_set(error, ENOTSUP,
4911                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4912                                 "modifications of the GENEVE Network"
4913                                 " Identifier is not supported");
4914         if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4915             action_modify_field->src.field == RTE_FLOW_FIELD_MARK ||
4916             action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
4917             action_modify_field->src.field == RTE_FLOW_FIELD_META) {
4918                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4919                     !mlx5_flow_ext_mreg_supported(dev))
4920                         return rte_flow_error_set(error, ENOTSUP,
4921                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4922                                         "cannot modify mark or metadata without"
4923                                         " extended metadata register support");
4924         }
4925         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4926                 return rte_flow_error_set(error, ENOTSUP,
4927                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4928                                 "add and sub operations"
4929                                 " are not supported");
4930         return (action_modify_field->width / 32) +
4931                !!(action_modify_field->width % 32);
4932 }
4933
4934 /**
4935  * Validate jump action.
4936  *
4937  * @param[in] action
4938  *   Pointer to the jump action.
4939  * @param[in] action_flags
4940  *   Holds the actions detected until now.
4941  * @param[in] attributes
4942  *   Pointer to flow attributes
4943  * @param[in] external
4944  *   Action belongs to flow rule created by request external to PMD.
4945  * @param[out] error
4946  *   Pointer to error structure.
4947  *
4948  * @return
4949  *   0 on success, a negative errno value otherwise and rte_errno is set.
4950  */
4951 static int
4952 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4953                              const struct mlx5_flow_tunnel *tunnel,
4954                              const struct rte_flow_action *action,
4955                              uint64_t action_flags,
4956                              const struct rte_flow_attr *attributes,
4957                              bool external, struct rte_flow_error *error)
4958 {
4959         uint32_t target_group, table;
4960         int ret = 0;
4961         struct flow_grp_info grp_info = {
4962                 .external = !!external,
4963                 .transfer = !!attributes->transfer,
4964                 .fdb_def_rule = 1,
4965                 .std_tbl_fix = 0
4966         };
4967         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4968                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4969                 return rte_flow_error_set(error, EINVAL,
4970                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4971                                           "can't have 2 fate actions in"
4972                                           " same flow");
4973         if (!action->conf)
4974                 return rte_flow_error_set(error, EINVAL,
4975                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4976                                           NULL, "action configuration not set");
4977         target_group =
4978                 ((const struct rte_flow_action_jump *)action->conf)->group;
4979         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4980                                        &grp_info, error);
4981         if (ret)
4982                 return ret;
4983         if (attributes->group == target_group &&
4984             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4985                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
4986                 return rte_flow_error_set(error, EINVAL,
4987                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4988                                           "target group must be other than"
4989                                           " the current flow group");
4990         return 0;
4991 }
4992
4993 /*
4994  * Validate the port_id action.
4995  *
4996  * @param[in] dev
4997  *   Pointer to rte_eth_dev structure.
4998  * @param[in] action_flags
4999  *   Bit-fields that holds the actions detected until now.
5000  * @param[in] action
5001  *   Port_id RTE action structure.
5002  * @param[in] attr
5003  *   Attributes of flow that includes this action.
5004  * @param[out] error
5005  *   Pointer to error structure.
5006  *
5007  * @return
5008  *   0 on success, a negative errno value otherwise and rte_errno is set.
5009  */
5010 static int
5011 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
5012                                 uint64_t action_flags,
5013                                 const struct rte_flow_action *action,
5014                                 const struct rte_flow_attr *attr,
5015                                 struct rte_flow_error *error)
5016 {
5017         const struct rte_flow_action_port_id *port_id;
5018         struct mlx5_priv *act_priv;
5019         struct mlx5_priv *dev_priv;
5020         uint16_t port;
5021
5022         if (!attr->transfer)
5023                 return rte_flow_error_set(error, ENOTSUP,
5024                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5025                                           NULL,
5026                                           "port id action is valid in transfer"
5027                                           " mode only");
5028         if (!action || !action->conf)
5029                 return rte_flow_error_set(error, ENOTSUP,
5030                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5031                                           NULL,
5032                                           "port id action parameters must be"
5033                                           " specified");
5034         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
5035                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5036                 return rte_flow_error_set(error, EINVAL,
5037                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5038                                           "can have only one fate actions in"
5039                                           " a flow");
5040         dev_priv = mlx5_dev_to_eswitch_info(dev);
5041         if (!dev_priv)
5042                 return rte_flow_error_set(error, rte_errno,
5043                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5044                                           NULL,
5045                                           "failed to obtain E-Switch info");
5046         port_id = action->conf;
5047         port = port_id->original ? dev->data->port_id : port_id->id;
5048         act_priv = mlx5_port_to_eswitch_info(port, false);
5049         if (!act_priv)
5050                 return rte_flow_error_set
5051                                 (error, rte_errno,
5052                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
5053                                  "failed to obtain E-Switch port id for port");
5054         if (act_priv->domain_id != dev_priv->domain_id)
5055                 return rte_flow_error_set
5056                                 (error, EINVAL,
5057                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5058                                  "port does not belong to"
5059                                  " E-Switch being configured");
5060         return 0;
5061 }
5062
5063 /**
5064  * Get the maximum number of modify header actions.
5065  *
5066  * @param dev
5067  *   Pointer to rte_eth_dev structure.
5068  * @param root
5069  *   Whether action is on root table.
5070  *
5071  * @return
5072  *   Max number of modify header actions device can support.
5073  */
5074 static inline unsigned int
5075 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
5076                               bool root)
5077 {
5078         /*
5079          * There's no way to directly query the max capacity from FW.
5080          * The maximal value on root table should be assumed to be supported.
5081          */
5082         if (!root)
5083                 return MLX5_MAX_MODIFY_NUM;
5084         else
5085                 return MLX5_ROOT_TBL_MODIFY_NUM;
5086 }
5087
5088 /**
5089  * Validate the meter action.
5090  *
5091  * @param[in] dev
5092  *   Pointer to rte_eth_dev structure.
5093  * @param[in] action_flags
5094  *   Bit-fields that holds the actions detected until now.
5095  * @param[in] action
5096  *   Pointer to the meter action.
5097  * @param[in] attr
5098  *   Attributes of flow that includes this action.
5099  * @param[in] port_id_item
5100  *   Pointer to item indicating port id.
5101  * @param[out] error
5102  *   Pointer to error structure.
5103  *
5104  * @return
5105  *   0 on success, a negative errno value otherwise and rte_ernno is set.
5106  */
5107 static int
5108 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
5109                                 uint64_t action_flags,
5110                                 const struct rte_flow_action *action,
5111                                 const struct rte_flow_attr *attr,
5112                                 const struct rte_flow_item *port_id_item,
5113                                 bool *def_policy,
5114                                 struct rte_flow_error *error)
5115 {
5116         struct mlx5_priv *priv = dev->data->dev_private;
5117         const struct rte_flow_action_meter *am = action->conf;
5118         struct mlx5_flow_meter_info *fm;
5119         struct mlx5_flow_meter_policy *mtr_policy;
5120         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
5121
5122         if (!am)
5123                 return rte_flow_error_set(error, EINVAL,
5124                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5125                                           "meter action conf is NULL");
5126
5127         if (action_flags & MLX5_FLOW_ACTION_METER)
5128                 return rte_flow_error_set(error, ENOTSUP,
5129                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5130                                           "meter chaining not support");
5131         if (action_flags & MLX5_FLOW_ACTION_JUMP)
5132                 return rte_flow_error_set(error, ENOTSUP,
5133                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5134                                           "meter with jump not support");
5135         if (!priv->mtr_en)
5136                 return rte_flow_error_set(error, ENOTSUP,
5137                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5138                                           NULL,
5139                                           "meter action not supported");
5140         fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
5141         if (!fm)
5142                 return rte_flow_error_set(error, EINVAL,
5143                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5144                                           "Meter not found");
5145         /* aso meter can always be shared by different domains */
5146         if (fm->ref_cnt && !priv->sh->meter_aso_en &&
5147             !(fm->transfer == attr->transfer ||
5148               (!fm->ingress && !attr->ingress && attr->egress) ||
5149               (!fm->egress && !attr->egress && attr->ingress)))
5150                 return rte_flow_error_set(error, EINVAL,
5151                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5152                         "Flow attributes domain are either invalid "
5153                         "or have a domain conflict with current "
5154                         "meter attributes");
5155         if (fm->def_policy) {
5156                 if (!((attr->transfer &&
5157                         mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
5158                         (attr->egress &&
5159                         mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
5160                         (attr->ingress &&
5161                         mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
5162                         return rte_flow_error_set(error, EINVAL,
5163                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5164                                           "Flow attributes domain "
5165                                           "have a conflict with current "
5166                                           "meter domain attributes");
5167                 *def_policy = true;
5168         } else {
5169                 mtr_policy = mlx5_flow_meter_policy_find(dev,
5170                                                 fm->policy_id, NULL);
5171                 if (!mtr_policy)
5172                         return rte_flow_error_set(error, EINVAL,
5173                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5174                                           "Invalid policy id for meter ");
5175                 if (!((attr->transfer && mtr_policy->transfer) ||
5176                         (attr->egress && mtr_policy->egress) ||
5177                         (attr->ingress && mtr_policy->ingress)))
5178                         return rte_flow_error_set(error, EINVAL,
5179                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5180                                           "Flow attributes domain "
5181                                           "have a conflict with current "
5182                                           "meter domain attributes");
5183                 if (attr->transfer && mtr_policy->dev) {
5184                         /**
5185                          * When policy has fate action of port_id,
5186                          * the flow should have the same src port as policy.
5187                          */
5188                         struct mlx5_priv *policy_port_priv =
5189                                         mtr_policy->dev->data->dev_private;
5190                         int32_t flow_src_port = priv->representor_id;
5191
5192                         if (port_id_item) {
5193                                 const struct rte_flow_item_port_id *spec =
5194                                                         port_id_item->spec;
5195                                 struct mlx5_priv *port_priv =
5196                                         mlx5_port_to_eswitch_info(spec->id,
5197                                                                   false);
5198                                 if (!port_priv)
5199                                         return rte_flow_error_set(error,
5200                                                 rte_errno,
5201                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5202                                                 spec,
5203                                                 "Failed to get port info.");
5204                                 flow_src_port = port_priv->representor_id;
5205                         }
5206                         if (flow_src_port != policy_port_priv->representor_id)
5207                                 return rte_flow_error_set(error,
5208                                                 rte_errno,
5209                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5210                                                 NULL,
5211                                                 "Flow and meter policy "
5212                                                 "have different src port.");
5213                 }
5214                 *def_policy = false;
5215         }
5216         return 0;
5217 }
5218
5219 /**
5220  * Validate the age action.
5221  *
5222  * @param[in] action_flags
5223  *   Holds the actions detected until now.
5224  * @param[in] action
5225  *   Pointer to the age action.
5226  * @param[in] dev
5227  *   Pointer to the Ethernet device structure.
5228  * @param[out] error
5229  *   Pointer to error structure.
5230  *
5231  * @return
5232  *   0 on success, a negative errno value otherwise and rte_errno is set.
5233  */
5234 static int
5235 flow_dv_validate_action_age(uint64_t action_flags,
5236                             const struct rte_flow_action *action,
5237                             struct rte_eth_dev *dev,
5238                             struct rte_flow_error *error)
5239 {
5240         struct mlx5_priv *priv = dev->data->dev_private;
5241         const struct rte_flow_action_age *age = action->conf;
5242
5243         if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
5244             !priv->sh->aso_age_mng))
5245                 return rte_flow_error_set(error, ENOTSUP,
5246                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5247                                           NULL,
5248                                           "age action not supported");
5249         if (!(action->conf))
5250                 return rte_flow_error_set(error, EINVAL,
5251                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5252                                           "configuration cannot be null");
5253         if (!(age->timeout))
5254                 return rte_flow_error_set(error, EINVAL,
5255                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5256                                           "invalid timeout value 0");
5257         if (action_flags & MLX5_FLOW_ACTION_AGE)
5258                 return rte_flow_error_set(error, EINVAL,
5259                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5260                                           "duplicate age actions set");
5261         return 0;
5262 }
5263
5264 /**
5265  * Validate the modify-header IPv4 DSCP actions.
5266  *
5267  * @param[in] action_flags
5268  *   Holds the actions detected until now.
5269  * @param[in] action
5270  *   Pointer to the modify action.
5271  * @param[in] item_flags
5272  *   Holds the items detected.
5273  * @param[out] error
5274  *   Pointer to error structure.
5275  *
5276  * @return
5277  *   0 on success, a negative errno value otherwise and rte_errno is set.
5278  */
5279 static int
5280 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5281                                          const struct rte_flow_action *action,
5282                                          const uint64_t item_flags,
5283                                          struct rte_flow_error *error)
5284 {
5285         int ret = 0;
5286
5287         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5288         if (!ret) {
5289                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5290                         return rte_flow_error_set(error, EINVAL,
5291                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5292                                                   NULL,
5293                                                   "no ipv4 item in pattern");
5294         }
5295         return ret;
5296 }
5297
5298 /**
5299  * Validate the modify-header IPv6 DSCP actions.
5300  *
5301  * @param[in] action_flags
5302  *   Holds the actions detected until now.
5303  * @param[in] action
5304  *   Pointer to the modify action.
5305  * @param[in] item_flags
5306  *   Holds the items detected.
5307  * @param[out] error
5308  *   Pointer to error structure.
5309  *
5310  * @return
5311  *   0 on success, a negative errno value otherwise and rte_errno is set.
5312  */
5313 static int
5314 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5315                                          const struct rte_flow_action *action,
5316                                          const uint64_t item_flags,
5317                                          struct rte_flow_error *error)
5318 {
5319         int ret = 0;
5320
5321         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5322         if (!ret) {
5323                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5324                         return rte_flow_error_set(error, EINVAL,
5325                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5326                                                   NULL,
5327                                                   "no ipv6 item in pattern");
5328         }
5329         return ret;
5330 }
5331
5332 int
5333 flow_dv_modify_match_cb(void *tool_ctx __rte_unused,
5334                         struct mlx5_list_entry *entry, void *cb_ctx)
5335 {
5336         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5337         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5338         struct mlx5_flow_dv_modify_hdr_resource *resource =
5339                                   container_of(entry, typeof(*resource), entry);
5340         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5341
5342         key_len += ref->actions_num * sizeof(ref->actions[0]);
5343         return ref->actions_num != resource->actions_num ||
5344                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5345 }
5346
5347 static struct mlx5_indexed_pool *
5348 flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
5349 {
5350         struct mlx5_indexed_pool *ipool = __atomic_load_n
5351                                      (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);
5352
5353         if (!ipool) {
5354                 struct mlx5_indexed_pool *expected = NULL;
5355                 struct mlx5_indexed_pool_config cfg =
5356                     (struct mlx5_indexed_pool_config) {
5357                        .size = sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
5358                                                                    (index + 1) *
5359                                            sizeof(struct mlx5_modification_cmd),
5360                        .trunk_size = 64,
5361                        .grow_trunk = 3,
5362                        .grow_shift = 2,
5363                        .need_lock = 1,
5364                        .release_mem_en = 1,
5365                        .malloc = mlx5_malloc,
5366                        .free = mlx5_free,
5367                        .type = "mlx5_modify_action_resource",
5368                 };
5369
5370                 cfg.size = RTE_ALIGN(cfg.size, sizeof(ipool));
5371                 ipool = mlx5_ipool_create(&cfg);
5372                 if (!ipool)
5373                         return NULL;
5374                 if (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],
5375                                                  &expected, ipool, false,
5376                                                  __ATOMIC_SEQ_CST,
5377                                                  __ATOMIC_SEQ_CST)) {
5378                         mlx5_ipool_destroy(ipool);
5379                         ipool = __atomic_load_n(&sh->mdh_ipools[index],
5380                                                 __ATOMIC_SEQ_CST);
5381                 }
5382         }
5383         return ipool;
5384 }
5385
5386 struct mlx5_list_entry *
5387 flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx)
5388 {
5389         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5390         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5391         struct mlx5dv_dr_domain *ns;
5392         struct mlx5_flow_dv_modify_hdr_resource *entry;
5393         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5394         struct mlx5_indexed_pool *ipool = flow_dv_modify_ipool_get(sh,
5395                                                           ref->actions_num - 1);
5396         int ret;
5397         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5398         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5399         uint32_t idx;
5400
5401         if (unlikely(!ipool)) {
5402                 rte_flow_error_set(ctx->error, ENOMEM,
5403                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5404                                    NULL, "cannot allocate modify ipool");
5405                 return NULL;
5406         }
5407         entry = mlx5_ipool_zmalloc(ipool, &idx);
5408         if (!entry) {
5409                 rte_flow_error_set(ctx->error, ENOMEM,
5410                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5411                                    "cannot allocate resource memory");
5412                 return NULL;
5413         }
5414         rte_memcpy(&entry->ft_type,
5415                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5416                    key_len + data_len);
5417         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5418                 ns = sh->fdb_domain;
5419         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5420                 ns = sh->tx_domain;
5421         else
5422                 ns = sh->rx_domain;
5423         ret = mlx5_flow_os_create_flow_action_modify_header
5424                                         (sh->ctx, ns, entry,
5425                                          data_len, &entry->action);
5426         if (ret) {
5427                 mlx5_ipool_free(sh->mdh_ipools[ref->actions_num - 1], idx);
5428                 rte_flow_error_set(ctx->error, ENOMEM,
5429                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5430                                    NULL, "cannot create modification action");
5431                 return NULL;
5432         }
5433         entry->idx = idx;
5434         return &entry->entry;
5435 }
5436
5437 struct mlx5_list_entry *
5438 flow_dv_modify_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
5439                         void *cb_ctx)
5440 {
5441         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5442         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5443         struct mlx5_flow_dv_modify_hdr_resource *entry;
5444         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5445         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5446         uint32_t idx;
5447
5448         entry = mlx5_ipool_malloc(sh->mdh_ipools[ref->actions_num - 1],
5449                                   &idx);
5450         if (!entry) {
5451                 rte_flow_error_set(ctx->error, ENOMEM,
5452                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5453                                    "cannot allocate resource memory");
5454                 return NULL;
5455         }
5456         memcpy(entry, oentry, sizeof(*entry) + data_len);
5457         entry->idx = idx;
5458         return &entry->entry;
5459 }
5460
5461 void
5462 flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
5463 {
5464         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5465         struct mlx5_flow_dv_modify_hdr_resource *res =
5466                 container_of(entry, typeof(*res), entry);
5467
5468         mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
5469 }
5470
5471 /**
5472  * Validate the sample action.
5473  *
5474  * @param[in, out] action_flags
5475  *   Holds the actions detected until now.
5476  * @param[in] action
5477  *   Pointer to the sample action.
5478  * @param[in] dev
5479  *   Pointer to the Ethernet device structure.
5480  * @param[in] attr
5481  *   Attributes of flow that includes this action.
5482  * @param[in] item_flags
5483  *   Holds the items detected.
5484  * @param[in] rss
5485  *   Pointer to the RSS action.
5486  * @param[out] sample_rss
5487  *   Pointer to the RSS action in sample action list.
5488  * @param[out] count
5489  *   Pointer to the COUNT action in sample action list.
5490  * @param[out] fdb_mirror_limit
5491  *   Pointer to the FDB mirror limitation flag.
5492  * @param[out] error
5493  *   Pointer to error structure.
5494  *
5495  * @return
5496  *   0 on success, a negative errno value otherwise and rte_errno is set.
5497  */
5498 static int
5499 flow_dv_validate_action_sample(uint64_t *action_flags,
5500                                const struct rte_flow_action *action,
5501                                struct rte_eth_dev *dev,
5502                                const struct rte_flow_attr *attr,
5503                                uint64_t item_flags,
5504                                const struct rte_flow_action_rss *rss,
5505                                const struct rte_flow_action_rss **sample_rss,
5506                                const struct rte_flow_action_count **count,
5507                                int *fdb_mirror_limit,
5508                                struct rte_flow_error *error)
5509 {
5510         struct mlx5_priv *priv = dev->data->dev_private;
5511         struct mlx5_dev_config *dev_conf = &priv->config;
5512         const struct rte_flow_action_sample *sample = action->conf;
5513         const struct rte_flow_action *act;
5514         uint64_t sub_action_flags = 0;
5515         uint16_t queue_index = 0xFFFF;
5516         int actions_n = 0;
5517         int ret;
5518
5519         if (!sample)
5520                 return rte_flow_error_set(error, EINVAL,
5521                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5522                                           "configuration cannot be NULL");
5523         if (sample->ratio == 0)
5524                 return rte_flow_error_set(error, EINVAL,
5525                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5526                                           "ratio value starts from 1");
5527         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
5528                 return rte_flow_error_set(error, ENOTSUP,
5529                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5530                                           NULL,
5531                                           "sample action not supported");
5532         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5533                 return rte_flow_error_set(error, EINVAL,
5534                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5535                                           "Multiple sample actions not "
5536                                           "supported");
5537         if (*action_flags & MLX5_FLOW_ACTION_METER)
5538                 return rte_flow_error_set(error, EINVAL,
5539                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5540                                           "wrong action order, meter should "
5541                                           "be after sample action");
5542         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5543                 return rte_flow_error_set(error, EINVAL,
5544                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5545                                           "wrong action order, jump should "
5546                                           "be after sample action");
5547         act = sample->actions;
5548         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5549                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5550                         return rte_flow_error_set(error, ENOTSUP,
5551                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5552                                                   act, "too many actions");
5553                 switch (act->type) {
5554                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5555                         ret = mlx5_flow_validate_action_queue(act,
5556                                                               sub_action_flags,
5557                                                               dev,
5558                                                               attr, error);
5559                         if (ret < 0)
5560                                 return ret;
5561                         queue_index = ((const struct rte_flow_action_queue *)
5562                                                         (act->conf))->index;
5563                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5564                         ++actions_n;
5565                         break;
5566                 case RTE_FLOW_ACTION_TYPE_RSS:
5567                         *sample_rss = act->conf;
5568                         ret = mlx5_flow_validate_action_rss(act,
5569                                                             sub_action_flags,
5570                                                             dev, attr,
5571                                                             item_flags,
5572                                                             error);
5573                         if (ret < 0)
5574                                 return ret;
5575                         if (rss && *sample_rss &&
5576                             ((*sample_rss)->level != rss->level ||
5577                             (*sample_rss)->types != rss->types))
5578                                 return rte_flow_error_set(error, ENOTSUP,
5579                                         RTE_FLOW_ERROR_TYPE_ACTION,
5580                                         NULL,
5581                                         "Can't use the different RSS types "
5582                                         "or level in the same flow");
5583                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5584                                 queue_index = (*sample_rss)->queue[0];
5585                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5586                         ++actions_n;
5587                         break;
5588                 case RTE_FLOW_ACTION_TYPE_MARK:
5589                         ret = flow_dv_validate_action_mark(dev, act,
5590                                                            sub_action_flags,
5591                                                            attr, error);
5592                         if (ret < 0)
5593                                 return ret;
5594                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5595                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5596                                                 MLX5_FLOW_ACTION_MARK_EXT;
5597                         else
5598                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5599                         ++actions_n;
5600                         break;
5601                 case RTE_FLOW_ACTION_TYPE_COUNT:
5602                         ret = flow_dv_validate_action_count
5603                                 (dev, is_shared_action_count(act),
5604                                  *action_flags | sub_action_flags,
5605                                  error);
5606                         if (ret < 0)
5607                                 return ret;
5608                         *count = act->conf;
5609                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5610                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5611                         ++actions_n;
5612                         break;
5613                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5614                         ret = flow_dv_validate_action_port_id(dev,
5615                                                               sub_action_flags,
5616                                                               act,
5617                                                               attr,
5618                                                               error);
5619                         if (ret)
5620                                 return ret;
5621                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5622                         ++actions_n;
5623                         break;
5624                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5625                         ret = flow_dv_validate_action_raw_encap_decap
5626                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5627                                  &actions_n, action, item_flags, error);
5628                         if (ret < 0)
5629                                 return ret;
5630                         ++actions_n;
5631                         break;
5632                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5633                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5634                         ret = flow_dv_validate_action_l2_encap(dev,
5635                                                                sub_action_flags,
5636                                                                act, attr,
5637                                                                error);
5638                         if (ret < 0)
5639                                 return ret;
5640                         sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5641                         ++actions_n;
5642                         break;
5643                 default:
5644                         return rte_flow_error_set(error, ENOTSUP,
5645                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5646                                                   NULL,
5647                                                   "Doesn't support optional "
5648                                                   "action");
5649                 }
5650         }
5651         if (attr->ingress && !attr->transfer) {
5652                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5653                                           MLX5_FLOW_ACTION_RSS)))
5654                         return rte_flow_error_set(error, EINVAL,
5655                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5656                                                   NULL,
5657                                                   "Ingress must has a dest "
5658                                                   "QUEUE for Sample");
5659         } else if (attr->egress && !attr->transfer) {
5660                 return rte_flow_error_set(error, ENOTSUP,
5661                                           RTE_FLOW_ERROR_TYPE_ACTION,
5662                                           NULL,
5663                                           "Sample Only support Ingress "
5664                                           "or E-Switch");
5665         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5666                 MLX5_ASSERT(attr->transfer);
5667                 if (sample->ratio > 1)
5668                         return rte_flow_error_set(error, ENOTSUP,
5669                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5670                                                   NULL,
5671                                                   "E-Switch doesn't support "
5672                                                   "any optional action "
5673                                                   "for sampling");
5674                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5675                         return rte_flow_error_set(error, ENOTSUP,
5676                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5677                                                   NULL,
5678                                                   "unsupported action QUEUE");
5679                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5680                         return rte_flow_error_set(error, ENOTSUP,
5681                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5682                                                   NULL,
5683                                                   "unsupported action QUEUE");
5684                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5685                         return rte_flow_error_set(error, EINVAL,
5686                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5687                                                   NULL,
5688                                                   "E-Switch must has a dest "
5689                                                   "port for mirroring");
5690                 if (!priv->config.hca_attr.reg_c_preserve &&
5691                      priv->representor_id != UINT16_MAX)
5692                         *fdb_mirror_limit = 1;
5693         }
5694         /* Continue validation for Xcap actions.*/
5695         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5696             (queue_index == 0xFFFF ||
5697              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5698                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5699                      MLX5_FLOW_XCAP_ACTIONS)
5700                         return rte_flow_error_set(error, ENOTSUP,
5701                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5702                                                   NULL, "encap and decap "
5703                                                   "combination aren't "
5704                                                   "supported");
5705                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5706                                                         MLX5_FLOW_ACTION_ENCAP))
5707                         return rte_flow_error_set(error, ENOTSUP,
5708                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5709                                                   NULL, "encap is not supported"
5710                                                   " for ingress traffic");
5711         }
5712         return 0;
5713 }
5714
5715 /**
5716  * Find existing modify-header resource or create and register a new one.
5717  *
5718  * @param dev[in, out]
5719  *   Pointer to rte_eth_dev structure.
5720  * @param[in, out] resource
5721  *   Pointer to modify-header resource.
5722  * @parm[in, out] dev_flow
5723  *   Pointer to the dev_flow.
5724  * @param[out] error
5725  *   pointer to error structure.
5726  *
5727  * @return
5728  *   0 on success otherwise -errno and errno is set.
5729  */
5730 static int
5731 flow_dv_modify_hdr_resource_register
5732                         (struct rte_eth_dev *dev,
5733                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5734                          struct mlx5_flow *dev_flow,
5735                          struct rte_flow_error *error)
5736 {
5737         struct mlx5_priv *priv = dev->data->dev_private;
5738         struct mlx5_dev_ctx_shared *sh = priv->sh;
5739         uint32_t key_len = sizeof(*resource) -
5740                            offsetof(typeof(*resource), ft_type) +
5741                            resource->actions_num * sizeof(resource->actions[0]);
5742         struct mlx5_list_entry *entry;
5743         struct mlx5_flow_cb_ctx ctx = {
5744                 .error = error,
5745                 .data = resource,
5746         };
5747         uint64_t key64;
5748
5749         resource->root = !dev_flow->dv.group;
5750         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5751                                                                 resource->root))
5752                 return rte_flow_error_set(error, EOVERFLOW,
5753                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5754                                           "too many modify header items");
5755         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5756         entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx);
5757         if (!entry)
5758                 return -rte_errno;
5759         resource = container_of(entry, typeof(*resource), entry);
5760         dev_flow->handle->dvh.modify_hdr = resource;
5761         return 0;
5762 }
5763
5764 /**
5765  * Get DV flow counter by index.
5766  *
5767  * @param[in] dev
5768  *   Pointer to the Ethernet device structure.
5769  * @param[in] idx
5770  *   mlx5 flow counter index in the container.
5771  * @param[out] ppool
5772  *   mlx5 flow counter pool in the container.
5773  *
5774  * @return
5775  *   Pointer to the counter, NULL otherwise.
5776  */
5777 static struct mlx5_flow_counter *
5778 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5779                            uint32_t idx,
5780                            struct mlx5_flow_counter_pool **ppool)
5781 {
5782         struct mlx5_priv *priv = dev->data->dev_private;
5783         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5784         struct mlx5_flow_counter_pool *pool;
5785
5786         /* Decrease to original index and clear shared bit. */
5787         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5788         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5789         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5790         MLX5_ASSERT(pool);
5791         if (ppool)
5792                 *ppool = pool;
5793         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5794 }
5795
5796 /**
5797  * Check the devx counter belongs to the pool.
5798  *
5799  * @param[in] pool
5800  *   Pointer to the counter pool.
5801  * @param[in] id
5802  *   The counter devx ID.
5803  *
5804  * @return
5805  *   True if counter belongs to the pool, false otherwise.
5806  */
5807 static bool
5808 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5809 {
5810         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5811                    MLX5_COUNTERS_PER_POOL;
5812
5813         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5814                 return true;
5815         return false;
5816 }
5817
5818 /**
5819  * Get a pool by devx counter ID.
5820  *
5821  * @param[in] cmng
5822  *   Pointer to the counter management.
5823  * @param[in] id
5824  *   The counter devx ID.
5825  *
5826  * @return
5827  *   The counter pool pointer if exists, NULL otherwise,
5828  */
5829 static struct mlx5_flow_counter_pool *
5830 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5831 {
5832         uint32_t i;
5833         struct mlx5_flow_counter_pool *pool = NULL;
5834
5835         rte_spinlock_lock(&cmng->pool_update_sl);
5836         /* Check last used pool. */
5837         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5838             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5839                 pool = cmng->pools[cmng->last_pool_idx];
5840                 goto out;
5841         }
5842         /* ID out of range means no suitable pool in the container. */
5843         if (id > cmng->max_id || id < cmng->min_id)
5844                 goto out;
5845         /*
5846          * Find the pool from the end of the container, since mostly counter
5847          * ID is sequence increasing, and the last pool should be the needed
5848          * one.
5849          */
5850         i = cmng->n_valid;
5851         while (i--) {
5852                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5853
5854                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5855                         pool = pool_tmp;
5856                         break;
5857                 }
5858         }
5859 out:
5860         rte_spinlock_unlock(&cmng->pool_update_sl);
5861         return pool;
5862 }
5863
5864 /**
5865  * Resize a counter container.
5866  *
5867  * @param[in] dev
5868  *   Pointer to the Ethernet device structure.
5869  *
5870  * @return
5871  *   0 on success, otherwise negative errno value and rte_errno is set.
5872  */
5873 static int
5874 flow_dv_container_resize(struct rte_eth_dev *dev)
5875 {
5876         struct mlx5_priv *priv = dev->data->dev_private;
5877         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5878         void *old_pools = cmng->pools;
5879         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5880         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5881         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5882
5883         if (!pools) {
5884                 rte_errno = ENOMEM;
5885                 return -ENOMEM;
5886         }
5887         if (old_pools)
5888                 memcpy(pools, old_pools, cmng->n *
5889                                        sizeof(struct mlx5_flow_counter_pool *));
5890         cmng->n = resize;
5891         cmng->pools = pools;
5892         if (old_pools)
5893                 mlx5_free(old_pools);
5894         return 0;
5895 }
5896
5897 /**
5898  * Query a devx flow counter.
5899  *
5900  * @param[in] dev
5901  *   Pointer to the Ethernet device structure.
5902  * @param[in] counter
5903  *   Index to the flow counter.
5904  * @param[out] pkts
5905  *   The statistics value of packets.
5906  * @param[out] bytes
5907  *   The statistics value of bytes.
5908  *
5909  * @return
5910  *   0 on success, otherwise a negative errno value and rte_errno is set.
5911  */
5912 static inline int
5913 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5914                      uint64_t *bytes)
5915 {
5916         struct mlx5_priv *priv = dev->data->dev_private;
5917         struct mlx5_flow_counter_pool *pool = NULL;
5918         struct mlx5_flow_counter *cnt;
5919         int offset;
5920
5921         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5922         MLX5_ASSERT(pool);
5923         if (priv->sh->cmng.counter_fallback)
5924                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
5925                                         0, pkts, bytes, 0, NULL, NULL, 0);
5926         rte_spinlock_lock(&pool->sl);
5927         if (!pool->raw) {
5928                 *pkts = 0;
5929                 *bytes = 0;
5930         } else {
5931                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
5932                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
5933                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
5934         }
5935         rte_spinlock_unlock(&pool->sl);
5936         return 0;
5937 }
5938
5939 /**
5940  * Create and initialize a new counter pool.
5941  *
5942  * @param[in] dev
5943  *   Pointer to the Ethernet device structure.
5944  * @param[out] dcs
5945  *   The devX counter handle.
5946  * @param[in] age
5947  *   Whether the pool is for counter that was allocated for aging.
5948  * @param[in/out] cont_cur
5949  *   Pointer to the container pointer, it will be update in pool resize.
5950  *
5951  * @return
5952  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
5953  */
5954 static struct mlx5_flow_counter_pool *
5955 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
5956                     uint32_t age)
5957 {
5958         struct mlx5_priv *priv = dev->data->dev_private;
5959         struct mlx5_flow_counter_pool *pool;
5960         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5961         bool fallback = priv->sh->cmng.counter_fallback;
5962         uint32_t size = sizeof(*pool);
5963
5964         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
5965         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
5966         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
5967         if (!pool) {
5968                 rte_errno = ENOMEM;
5969                 return NULL;
5970         }
5971         pool->raw = NULL;
5972         pool->is_aged = !!age;
5973         pool->query_gen = 0;
5974         pool->min_dcs = dcs;
5975         rte_spinlock_init(&pool->sl);
5976         rte_spinlock_init(&pool->csl);
5977         TAILQ_INIT(&pool->counters[0]);
5978         TAILQ_INIT(&pool->counters[1]);
5979         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
5980         rte_spinlock_lock(&cmng->pool_update_sl);
5981         pool->index = cmng->n_valid;
5982         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
5983                 mlx5_free(pool);
5984                 rte_spinlock_unlock(&cmng->pool_update_sl);
5985                 return NULL;
5986         }
5987         cmng->pools[pool->index] = pool;
5988         cmng->n_valid++;
5989         if (unlikely(fallback)) {
5990                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
5991
5992                 if (base < cmng->min_id)
5993                         cmng->min_id = base;
5994                 if (base > cmng->max_id)
5995                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
5996                 cmng->last_pool_idx = pool->index;
5997         }
5998         rte_spinlock_unlock(&cmng->pool_update_sl);
5999         return pool;
6000 }
6001
6002 /**
6003  * Prepare a new counter and/or a new counter pool.
6004  *
6005  * @param[in] dev
6006  *   Pointer to the Ethernet device structure.
6007  * @param[out] cnt_free
6008  *   Where to put the pointer of a new counter.
6009  * @param[in] age
6010  *   Whether the pool is for counter that was allocated for aging.
6011  *
6012  * @return
6013  *   The counter pool pointer and @p cnt_free is set on success,
6014  *   NULL otherwise and rte_errno is set.
6015  */
6016 static struct mlx5_flow_counter_pool *
6017 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
6018                              struct mlx5_flow_counter **cnt_free,
6019                              uint32_t age)
6020 {
6021         struct mlx5_priv *priv = dev->data->dev_private;
6022         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6023         struct mlx5_flow_counter_pool *pool;
6024         struct mlx5_counters tmp_tq;
6025         struct mlx5_devx_obj *dcs = NULL;
6026         struct mlx5_flow_counter *cnt;
6027         enum mlx5_counter_type cnt_type =
6028                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6029         bool fallback = priv->sh->cmng.counter_fallback;
6030         uint32_t i;
6031
6032         if (fallback) {
6033                 /* bulk_bitmap must be 0 for single counter allocation. */
6034                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
6035                 if (!dcs)
6036                         return NULL;
6037                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
6038                 if (!pool) {
6039                         pool = flow_dv_pool_create(dev, dcs, age);
6040                         if (!pool) {
6041                                 mlx5_devx_cmd_destroy(dcs);
6042                                 return NULL;
6043                         }
6044                 }
6045                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
6046                 cnt = MLX5_POOL_GET_CNT(pool, i);
6047                 cnt->pool = pool;
6048                 cnt->dcs_when_free = dcs;
6049                 *cnt_free = cnt;
6050                 return pool;
6051         }
6052         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
6053         if (!dcs) {
6054                 rte_errno = ENODATA;
6055                 return NULL;
6056         }
6057         pool = flow_dv_pool_create(dev, dcs, age);
6058         if (!pool) {
6059                 mlx5_devx_cmd_destroy(dcs);
6060                 return NULL;
6061         }
6062         TAILQ_INIT(&tmp_tq);
6063         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
6064                 cnt = MLX5_POOL_GET_CNT(pool, i);
6065                 cnt->pool = pool;
6066                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
6067         }
6068         rte_spinlock_lock(&cmng->csl[cnt_type]);
6069         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
6070         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6071         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
6072         (*cnt_free)->pool = pool;
6073         return pool;
6074 }
6075
6076 /**
6077  * Allocate a flow counter.
6078  *
6079  * @param[in] dev
6080  *   Pointer to the Ethernet device structure.
6081  * @param[in] age
6082  *   Whether the counter was allocated for aging.
6083  *
6084  * @return
6085  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6086  */
6087 static uint32_t
6088 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
6089 {
6090         struct mlx5_priv *priv = dev->data->dev_private;
6091         struct mlx5_flow_counter_pool *pool = NULL;
6092         struct mlx5_flow_counter *cnt_free = NULL;
6093         bool fallback = priv->sh->cmng.counter_fallback;
6094         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6095         enum mlx5_counter_type cnt_type =
6096                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6097         uint32_t cnt_idx;
6098
6099         if (!priv->config.devx) {
6100                 rte_errno = ENOTSUP;
6101                 return 0;
6102         }
6103         /* Get free counters from container. */
6104         rte_spinlock_lock(&cmng->csl[cnt_type]);
6105         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
6106         if (cnt_free)
6107                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
6108         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6109         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
6110                 goto err;
6111         pool = cnt_free->pool;
6112         if (fallback)
6113                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
6114         /* Create a DV counter action only in the first time usage. */
6115         if (!cnt_free->action) {
6116                 uint16_t offset;
6117                 struct mlx5_devx_obj *dcs;
6118                 int ret;
6119
6120                 if (!fallback) {
6121                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
6122                         dcs = pool->min_dcs;
6123                 } else {
6124                         offset = 0;
6125                         dcs = cnt_free->dcs_when_free;
6126                 }
6127                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
6128                                                             &cnt_free->action);
6129                 if (ret) {
6130                         rte_errno = errno;
6131                         goto err;
6132                 }
6133         }
6134         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
6135                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
6136         /* Update the counter reset values. */
6137         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
6138                                  &cnt_free->bytes))
6139                 goto err;
6140         if (!fallback && !priv->sh->cmng.query_thread_on)
6141                 /* Start the asynchronous batch query by the host thread. */
6142                 mlx5_set_query_alarm(priv->sh);
6143         /*
6144          * When the count action isn't shared (by ID), shared_info field is
6145          * used for indirect action API's refcnt.
6146          * When the counter action is not shared neither by ID nor by indirect
6147          * action API, shared info must be 1.
6148          */
6149         cnt_free->shared_info.refcnt = 1;
6150         return cnt_idx;
6151 err:
6152         if (cnt_free) {
6153                 cnt_free->pool = pool;
6154                 if (fallback)
6155                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
6156                 rte_spinlock_lock(&cmng->csl[cnt_type]);
6157                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
6158                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
6159         }
6160         return 0;
6161 }
6162
6163 /**
6164  * Allocate a shared flow counter.
6165  *
6166  * @param[in] ctx
6167  *   Pointer to the shared counter configuration.
6168  * @param[in] data
6169  *   Pointer to save the allocated counter index.
6170  *
6171  * @return
6172  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6173  */
6174
6175 static int32_t
6176 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
6177 {
6178         struct mlx5_shared_counter_conf *conf = ctx;
6179         struct rte_eth_dev *dev = conf->dev;
6180         struct mlx5_flow_counter *cnt;
6181
6182         data->dword = flow_dv_counter_alloc(dev, 0);
6183         data->dword |= MLX5_CNT_SHARED_OFFSET;
6184         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
6185         cnt->shared_info.id = conf->id;
6186         return 0;
6187 }
6188
6189 /**
6190  * Get a shared flow counter.
6191  *
6192  * @param[in] dev
6193  *   Pointer to the Ethernet device structure.
6194  * @param[in] id
6195  *   Counter identifier.
6196  *
6197  * @return
6198  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6199  */
6200 static uint32_t
6201 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
6202 {
6203         struct mlx5_priv *priv = dev->data->dev_private;
6204         struct mlx5_shared_counter_conf conf = {
6205                 .dev = dev,
6206                 .id = id,
6207         };
6208         union mlx5_l3t_data data = {
6209                 .dword = 0,
6210         };
6211
6212         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
6213                                flow_dv_counter_alloc_shared_cb, &conf);
6214         return data.dword;
6215 }
6216
6217 /**
6218  * Get age param from counter index.
6219  *
6220  * @param[in] dev
6221  *   Pointer to the Ethernet device structure.
6222  * @param[in] counter
6223  *   Index to the counter handler.
6224  *
6225  * @return
6226  *   The aging parameter specified for the counter index.
6227  */
6228 static struct mlx5_age_param*
6229 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
6230                                 uint32_t counter)
6231 {
6232         struct mlx5_flow_counter *cnt;
6233         struct mlx5_flow_counter_pool *pool = NULL;
6234
6235         flow_dv_counter_get_by_idx(dev, counter, &pool);
6236         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
6237         cnt = MLX5_POOL_GET_CNT(pool, counter);
6238         return MLX5_CNT_TO_AGE(cnt);
6239 }
6240
6241 /**
6242  * Remove a flow counter from aged counter list.
6243  *
6244  * @param[in] dev
6245  *   Pointer to the Ethernet device structure.
6246  * @param[in] counter
6247  *   Index to the counter handler.
6248  * @param[in] cnt
6249  *   Pointer to the counter handler.
6250  */
6251 static void
6252 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
6253                                 uint32_t counter, struct mlx5_flow_counter *cnt)
6254 {
6255         struct mlx5_age_info *age_info;
6256         struct mlx5_age_param *age_param;
6257         struct mlx5_priv *priv = dev->data->dev_private;
6258         uint16_t expected = AGE_CANDIDATE;
6259
6260         age_info = GET_PORT_AGE_INFO(priv);
6261         age_param = flow_dv_counter_idx_get_age(dev, counter);
6262         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
6263                                          AGE_FREE, false, __ATOMIC_RELAXED,
6264                                          __ATOMIC_RELAXED)) {
6265                 /**
6266                  * We need the lock even it is age timeout,
6267                  * since counter may still in process.
6268                  */
6269                 rte_spinlock_lock(&age_info->aged_sl);
6270                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
6271                 rte_spinlock_unlock(&age_info->aged_sl);
6272                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
6273         }
6274 }
6275
6276 /**
6277  * Release a flow counter.
6278  *
6279  * @param[in] dev
6280  *   Pointer to the Ethernet device structure.
6281  * @param[in] counter
6282  *   Index to the counter handler.
6283  */
6284 static void
6285 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
6286 {
6287         struct mlx5_priv *priv = dev->data->dev_private;
6288         struct mlx5_flow_counter_pool *pool = NULL;
6289         struct mlx5_flow_counter *cnt;
6290         enum mlx5_counter_type cnt_type;
6291
6292         if (!counter)
6293                 return;
6294         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6295         MLX5_ASSERT(pool);
6296         if (pool->is_aged) {
6297                 flow_dv_counter_remove_from_age(dev, counter, cnt);
6298         } else {
6299                 /*
6300                  * If the counter action is shared by ID, the l3t_clear_entry
6301                  * function reduces its references counter. If after the
6302                  * reduction the action is still referenced, the function
6303                  * returns here and does not release it.
6304                  */
6305                 if (IS_LEGACY_SHARED_CNT(counter) &&
6306                     mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl,
6307                                          cnt->shared_info.id))
6308                         return;
6309                 /*
6310                  * If the counter action is shared by indirect action API,
6311                  * the atomic function reduces its references counter.
6312                  * If after the reduction the action is still referenced, the
6313                  * function returns here and does not release it.
6314                  * When the counter action is not shared neither by ID nor by
6315                  * indirect action API, shared info is 1 before the reduction,
6316                  * so this condition is failed and function doesn't return here.
6317                  */
6318                 if (!IS_LEGACY_SHARED_CNT(counter) &&
6319                     __atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
6320                                        __ATOMIC_RELAXED))
6321                         return;
6322         }
6323         cnt->pool = pool;
6324         /*
6325          * Put the counter back to list to be updated in none fallback mode.
6326          * Currently, we are using two list alternately, while one is in query,
6327          * add the freed counter to the other list based on the pool query_gen
6328          * value. After query finishes, add counter the list to the global
6329          * container counter list. The list changes while query starts. In
6330          * this case, lock will not be needed as query callback and release
6331          * function both operate with the different list.
6332          */
6333         if (!priv->sh->cmng.counter_fallback) {
6334                 rte_spinlock_lock(&pool->csl);
6335                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
6336                 rte_spinlock_unlock(&pool->csl);
6337         } else {
6338                 cnt->dcs_when_free = cnt->dcs_when_active;
6339                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6340                                            MLX5_COUNTER_TYPE_ORIGIN;
6341                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
6342                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
6343                                   cnt, next);
6344                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
6345         }
6346 }
6347
6348 /**
6349  * Resize a meter id container.
6350  *
6351  * @param[in] dev
6352  *   Pointer to the Ethernet device structure.
6353  *
6354  * @return
6355  *   0 on success, otherwise negative errno value and rte_errno is set.
6356  */
6357 static int
6358 flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
6359 {
6360         struct mlx5_priv *priv = dev->data->dev_private;
6361         struct mlx5_aso_mtr_pools_mng *pools_mng =
6362                                 &priv->sh->mtrmng->pools_mng;
6363         void *old_pools = pools_mng->pools;
6364         uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
6365         uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
6366         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6367
6368         if (!pools) {
6369                 rte_errno = ENOMEM;
6370                 return -ENOMEM;
6371         }
6372         if (!pools_mng->n)
6373                 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6374                         mlx5_free(pools);
6375                         return -ENOMEM;
6376                 }
6377         if (old_pools)
6378                 memcpy(pools, old_pools, pools_mng->n *
6379                                        sizeof(struct mlx5_aso_mtr_pool *));
6380         pools_mng->n = resize;
6381         pools_mng->pools = pools;
6382         if (old_pools)
6383                 mlx5_free(old_pools);
6384         return 0;
6385 }
6386
6387 /**
6388  * Prepare a new meter and/or a new meter pool.
6389  *
6390  * @param[in] dev
6391  *   Pointer to the Ethernet device structure.
6392  * @param[out] mtr_free
6393  *   Where to put the pointer of a new meter.g.
6394  *
6395  * @return
6396  *   The meter pool pointer and @mtr_free is set on success,
6397  *   NULL otherwise and rte_errno is set.
6398  */
6399 static struct mlx5_aso_mtr_pool *
6400 flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
6401                              struct mlx5_aso_mtr **mtr_free)
6402 {
6403         struct mlx5_priv *priv = dev->data->dev_private;
6404         struct mlx5_aso_mtr_pools_mng *pools_mng =
6405                                 &priv->sh->mtrmng->pools_mng;
6406         struct mlx5_aso_mtr_pool *pool = NULL;
6407         struct mlx5_devx_obj *dcs = NULL;
6408         uint32_t i;
6409         uint32_t log_obj_size;
6410
6411         log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6412         dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->ctx,
6413                         priv->sh->pdn, log_obj_size);
6414         if (!dcs) {
6415                 rte_errno = ENODATA;
6416                 return NULL;
6417         }
6418         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6419         if (!pool) {
6420                 rte_errno = ENOMEM;
6421                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6422                 return NULL;
6423         }
6424         pool->devx_obj = dcs;
6425         pool->index = pools_mng->n_valid;
6426         if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6427                 mlx5_free(pool);
6428                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6429                 return NULL;
6430         }
6431         pools_mng->pools[pool->index] = pool;
6432         pools_mng->n_valid++;
6433         for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6434                 pool->mtrs[i].offset = i;
6435                 LIST_INSERT_HEAD(&pools_mng->meters,
6436                                                 &pool->mtrs[i], next);
6437         }
6438         pool->mtrs[0].offset = 0;
6439         *mtr_free = &pool->mtrs[0];
6440         return pool;
6441 }
6442
6443 /**
6444  * Release a flow meter into pool.
6445  *
6446  * @param[in] dev
6447  *   Pointer to the Ethernet device structure.
6448  * @param[in] mtr_idx
6449  *   Index to aso flow meter.
6450  */
6451 static void
6452 flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6453 {
6454         struct mlx5_priv *priv = dev->data->dev_private;
6455         struct mlx5_aso_mtr_pools_mng *pools_mng =
6456                                 &priv->sh->mtrmng->pools_mng;
6457         struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6458
6459         MLX5_ASSERT(aso_mtr);
6460         rte_spinlock_lock(&pools_mng->mtrsl);
6461         memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6462         aso_mtr->state = ASO_METER_FREE;
6463         LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6464         rte_spinlock_unlock(&pools_mng->mtrsl);
6465 }
6466
6467 /**
6468  * Allocate a aso flow meter.
6469  *
6470  * @param[in] dev
6471  *   Pointer to the Ethernet device structure.
6472  *
6473  * @return
6474  *   Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6475  */
6476 static uint32_t
6477 flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6478 {
6479         struct mlx5_priv *priv = dev->data->dev_private;
6480         struct mlx5_aso_mtr *mtr_free = NULL;
6481         struct mlx5_aso_mtr_pools_mng *pools_mng =
6482                                 &priv->sh->mtrmng->pools_mng;
6483         struct mlx5_aso_mtr_pool *pool;
6484         uint32_t mtr_idx = 0;
6485
6486         if (!priv->config.devx) {
6487                 rte_errno = ENOTSUP;
6488                 return 0;
6489         }
6490         /* Allocate the flow meter memory. */
6491         /* Get free meters from management. */
6492         rte_spinlock_lock(&pools_mng->mtrsl);
6493         mtr_free = LIST_FIRST(&pools_mng->meters);
6494         if (mtr_free)
6495                 LIST_REMOVE(mtr_free, next);
6496         if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6497                 rte_spinlock_unlock(&pools_mng->mtrsl);
6498                 return 0;
6499         }
6500         mtr_free->state = ASO_METER_WAIT;
6501         rte_spinlock_unlock(&pools_mng->mtrsl);
6502         pool = container_of(mtr_free,
6503                         struct mlx5_aso_mtr_pool,
6504                         mtrs[mtr_free->offset]);
6505         mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6506         if (!mtr_free->fm.meter_action) {
6507 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6508                 struct rte_flow_error error;
6509                 uint8_t reg_id;
6510
6511                 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6512                 mtr_free->fm.meter_action =
6513                         mlx5_glue->dv_create_flow_action_aso
6514                                                 (priv->sh->rx_domain,
6515                                                  pool->devx_obj->obj,
6516                                                  mtr_free->offset,
6517                                                  (1 << MLX5_FLOW_COLOR_GREEN),
6518                                                  reg_id - REG_C_0);
6519 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6520                 if (!mtr_free->fm.meter_action) {
6521                         flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6522                         return 0;
6523                 }
6524         }
6525         return mtr_idx;
6526 }
6527
6528 /**
6529  * Verify the @p attributes will be correctly understood by the NIC and store
6530  * them in the @p flow if everything is correct.
6531  *
6532  * @param[in] dev
6533  *   Pointer to dev struct.
6534  * @param[in] attributes
6535  *   Pointer to flow attributes
6536  * @param[in] external
6537  *   This flow rule is created by request external to PMD.
6538  * @param[out] error
6539  *   Pointer to error structure.
6540  *
6541  * @return
6542  *   - 0 on success and non root table.
6543  *   - 1 on success and root table.
6544  *   - a negative errno value otherwise and rte_errno is set.
6545  */
6546 static int
6547 flow_dv_validate_attributes(struct rte_eth_dev *dev,
6548                             const struct mlx5_flow_tunnel *tunnel,
6549                             const struct rte_flow_attr *attributes,
6550                             const struct flow_grp_info *grp_info,
6551                             struct rte_flow_error *error)
6552 {
6553         struct mlx5_priv *priv = dev->data->dev_private;
6554         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6555         int ret = 0;
6556
6557 #ifndef HAVE_MLX5DV_DR
6558         RTE_SET_USED(tunnel);
6559         RTE_SET_USED(grp_info);
6560         if (attributes->group)
6561                 return rte_flow_error_set(error, ENOTSUP,
6562                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6563                                           NULL,
6564                                           "groups are not supported");
6565 #else
6566         uint32_t table = 0;
6567
6568         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6569                                        grp_info, error);
6570         if (ret)
6571                 return ret;
6572         if (!table)
6573                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6574 #endif
6575         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6576             attributes->priority > lowest_priority)
6577                 return rte_flow_error_set(error, ENOTSUP,
6578                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6579                                           NULL,
6580                                           "priority out of range");
6581         if (attributes->transfer) {
6582                 if (!priv->config.dv_esw_en)
6583                         return rte_flow_error_set
6584                                 (error, ENOTSUP,
6585                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6586                                  "E-Switch dr is not supported");
6587                 if (!(priv->representor || priv->master))
6588                         return rte_flow_error_set
6589                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6590                                  NULL, "E-Switch configuration can only be"
6591                                  " done by a master or a representor device");
6592                 if (attributes->egress)
6593                         return rte_flow_error_set
6594                                 (error, ENOTSUP,
6595                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6596                                  "egress is not supported");
6597         }
6598         if (!(attributes->egress ^ attributes->ingress))
6599                 return rte_flow_error_set(error, ENOTSUP,
6600                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6601                                           "must specify exactly one of "
6602                                           "ingress or egress");
6603         return ret;
6604 }
6605
6606 static uint16_t
6607 mlx5_flow_locate_proto_l3(const struct rte_flow_item **head,
6608                           const struct rte_flow_item *end)
6609 {
6610         const struct rte_flow_item *item = *head;
6611         uint16_t l3_protocol;
6612
6613         for (; item != end; item++) {
6614                 switch (item->type) {
6615                 default:
6616                         break;
6617                 case RTE_FLOW_ITEM_TYPE_IPV4:
6618                         l3_protocol = RTE_ETHER_TYPE_IPV4;
6619                         goto l3_ok;
6620                 case RTE_FLOW_ITEM_TYPE_IPV6:
6621                         l3_protocol = RTE_ETHER_TYPE_IPV6;
6622                         goto l3_ok;
6623                 case RTE_FLOW_ITEM_TYPE_ETH:
6624                         if (item->mask && item->spec) {
6625                                 MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_eth,
6626                                                             type, item,
6627                                                             l3_protocol);
6628                                 if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
6629                                     l3_protocol == RTE_ETHER_TYPE_IPV6)
6630                                         goto l3_ok;
6631                         }
6632                         break;
6633                 case RTE_FLOW_ITEM_TYPE_VLAN:
6634                         if (item->mask && item->spec) {
6635                                 MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_vlan,
6636                                                             inner_type, item,
6637                                                             l3_protocol);
6638                                 if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
6639                                     l3_protocol == RTE_ETHER_TYPE_IPV6)
6640                                         goto l3_ok;
6641                         }
6642                         break;
6643                 }
6644         }
6645         return 0;
6646 l3_ok:
6647         *head = item;
6648         return l3_protocol;
6649 }
6650
6651 static uint8_t
6652 mlx5_flow_locate_proto_l4(const struct rte_flow_item **head,
6653                           const struct rte_flow_item *end)
6654 {
6655         const struct rte_flow_item *item = *head;
6656         uint8_t l4_protocol;
6657
6658         for (; item != end; item++) {
6659                 switch (item->type) {
6660                 default:
6661                         break;
6662                 case RTE_FLOW_ITEM_TYPE_TCP:
6663                         l4_protocol = IPPROTO_TCP;
6664                         goto l4_ok;
6665                 case RTE_FLOW_ITEM_TYPE_UDP:
6666                         l4_protocol = IPPROTO_UDP;
6667                         goto l4_ok;
6668                 case RTE_FLOW_ITEM_TYPE_IPV4:
6669                         if (item->mask && item->spec) {
6670                                 const struct rte_flow_item_ipv4 *mask, *spec;
6671
6672                                 mask = (typeof(mask))item->mask;
6673                                 spec = (typeof(spec))item->spec;
6674                                 l4_protocol = mask->hdr.next_proto_id &
6675                                               spec->hdr.next_proto_id;
6676                                 if (l4_protocol == IPPROTO_TCP ||
6677                                     l4_protocol == IPPROTO_UDP)
6678                                         goto l4_ok;
6679                         }
6680                         break;
6681                 case RTE_FLOW_ITEM_TYPE_IPV6:
6682                         if (item->mask && item->spec) {
6683                                 const struct rte_flow_item_ipv6 *mask, *spec;
6684                                 mask = (typeof(mask))item->mask;
6685                                 spec = (typeof(spec))item->spec;
6686                                 l4_protocol = mask->hdr.proto & spec->hdr.proto;
6687                                 if (l4_protocol == IPPROTO_TCP ||
6688                                     l4_protocol == IPPROTO_UDP)
6689                                         goto l4_ok;
6690                         }
6691                         break;
6692                 }
6693         }
6694         return 0;
6695 l4_ok:
6696         *head = item;
6697         return l4_protocol;
6698 }
6699
6700 static int
6701 flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
6702                                 const struct rte_flow_item *rule_items,
6703                                 const struct rte_flow_item *integrity_item,
6704                                 struct rte_flow_error *error)
6705 {
6706         struct mlx5_priv *priv = dev->data->dev_private;
6707         const struct rte_flow_item *tunnel_item, *end_item, *item = rule_items;
6708         const struct rte_flow_item_integrity *mask = (typeof(mask))
6709                                                      integrity_item->mask;
6710         const struct rte_flow_item_integrity *spec = (typeof(spec))
6711                                                      integrity_item->spec;
6712         uint32_t protocol;
6713
6714         if (!priv->config.hca_attr.pkt_integrity_match)
6715                 return rte_flow_error_set(error, ENOTSUP,
6716                                           RTE_FLOW_ERROR_TYPE_ITEM,
6717                                           integrity_item,
6718                                           "packet integrity integrity_item not supported");
6719         if (!mask)
6720                 mask = &rte_flow_item_integrity_mask;
6721         if (!mlx5_validate_integrity_item(mask))
6722                 return rte_flow_error_set(error, ENOTSUP,
6723                                           RTE_FLOW_ERROR_TYPE_ITEM,
6724                                           integrity_item,
6725                                           "unsupported integrity filter");
6726         tunnel_item = mlx5_flow_find_tunnel_item(rule_items);
6727         if (spec->level > 1) {
6728                 if (!tunnel_item)
6729                         return rte_flow_error_set(error, ENOTSUP,
6730                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6731                                                   integrity_item,
6732                                                   "missing tunnel item");
6733                 item = tunnel_item;
6734                 end_item = mlx5_find_end_item(tunnel_item);
6735         } else {
6736                 end_item = tunnel_item ? tunnel_item :
6737                            mlx5_find_end_item(integrity_item);
6738         }
6739         if (mask->l3_ok || mask->ipv4_csum_ok) {
6740                 protocol = mlx5_flow_locate_proto_l3(&item, end_item);
6741                 if (!protocol)
6742                         return rte_flow_error_set(error, EINVAL,
6743                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6744                                                   integrity_item,
6745                                                   "missing L3 protocol");
6746         }
6747         if (mask->l4_ok || mask->l4_csum_ok) {
6748                 protocol = mlx5_flow_locate_proto_l4(&item, end_item);
6749                 if (!protocol)
6750                         return rte_flow_error_set(error, EINVAL,
6751                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6752                                                   integrity_item,
6753                                                   "missing L4 protocol");
6754         }
6755         return 0;
6756 }
6757
6758 /**
6759  * Internal validation function. For validating both actions and items.
6760  *
6761  * @param[in] dev
6762  *   Pointer to the rte_eth_dev structure.
6763  * @param[in] attr
6764  *   Pointer to the flow attributes.
6765  * @param[in] items
6766  *   Pointer to the list of items.
6767  * @param[in] actions
6768  *   Pointer to the list of actions.
6769  * @param[in] external
6770  *   This flow rule is created by request external to PMD.
6771  * @param[in] hairpin
6772  *   Number of hairpin TX actions, 0 means classic flow.
6773  * @param[out] error
6774  *   Pointer to the error structure.
6775  *
6776  * @return
6777  *   0 on success, a negative errno value otherwise and rte_errno is set.
6778  */
6779 static int
6780 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6781                  const struct rte_flow_item items[],
6782                  const struct rte_flow_action actions[],
6783                  bool external, int hairpin, struct rte_flow_error *error)
6784 {
6785         int ret;
6786         uint64_t action_flags = 0;
6787         uint64_t item_flags = 0;
6788         uint64_t last_item = 0;
6789         uint8_t next_protocol = 0xff;
6790         uint16_t ether_type = 0;
6791         int actions_n = 0;
6792         uint8_t item_ipv6_proto = 0;
6793         int fdb_mirror_limit = 0;
6794         int modify_after_mirror = 0;
6795         const struct rte_flow_item *geneve_item = NULL;
6796         const struct rte_flow_item *gre_item = NULL;
6797         const struct rte_flow_item *gtp_item = NULL;
6798         const struct rte_flow_action_raw_decap *decap;
6799         const struct rte_flow_action_raw_encap *encap;
6800         const struct rte_flow_action_rss *rss = NULL;
6801         const struct rte_flow_action_rss *sample_rss = NULL;
6802         const struct rte_flow_action_count *sample_count = NULL;
6803         const struct rte_flow_item_tcp nic_tcp_mask = {
6804                 .hdr = {
6805                         .tcp_flags = 0xFF,
6806                         .src_port = RTE_BE16(UINT16_MAX),
6807                         .dst_port = RTE_BE16(UINT16_MAX),
6808                 }
6809         };
6810         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6811                 .hdr = {
6812                         .src_addr =
6813                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6814                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6815                         .dst_addr =
6816                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6817                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6818                         .vtc_flow = RTE_BE32(0xffffffff),
6819                         .proto = 0xff,
6820                         .hop_limits = 0xff,
6821                 },
6822                 .has_frag_ext = 1,
6823         };
6824         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6825                 .hdr = {
6826                         .common = {
6827                                 .u32 =
6828                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6829                                         .type = 0xFF,
6830                                         }).u32),
6831                         },
6832                         .dummy[0] = 0xffffffff,
6833                 },
6834         };
6835         struct mlx5_priv *priv = dev->data->dev_private;
6836         struct mlx5_dev_config *dev_conf = &priv->config;
6837         uint16_t queue_index = 0xFFFF;
6838         const struct rte_flow_item_vlan *vlan_m = NULL;
6839         uint32_t rw_act_num = 0;
6840         uint64_t is_root;
6841         const struct mlx5_flow_tunnel *tunnel;
6842         enum mlx5_tof_rule_type tof_rule_type;
6843         struct flow_grp_info grp_info = {
6844                 .external = !!external,
6845                 .transfer = !!attr->transfer,
6846                 .fdb_def_rule = !!priv->fdb_def_rule,
6847                 .std_tbl_fix = true,
6848         };
6849         const struct rte_eth_hairpin_conf *conf;
6850         const struct rte_flow_item *rule_items = items;
6851         const struct rte_flow_item *port_id_item = NULL;
6852         bool def_policy = false;
6853
6854         if (items == NULL)
6855                 return -1;
6856         tunnel = is_tunnel_offload_active(dev) ?
6857                  mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
6858         if (tunnel) {
6859                 if (priv->representor)
6860                         return rte_flow_error_set
6861                                 (error, ENOTSUP,
6862                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6863                                  NULL, "decap not supported for VF representor");
6864                 if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
6865                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6866                 else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
6867                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6868                                         MLX5_FLOW_ACTION_DECAP;
6869                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6870                                         (dev, attr, tunnel, tof_rule_type);
6871         }
6872         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6873         if (ret < 0)
6874                 return ret;
6875         is_root = (uint64_t)ret;
6876         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6877                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6878                 int type = items->type;
6879
6880                 if (!mlx5_flow_os_item_supported(type))
6881                         return rte_flow_error_set(error, ENOTSUP,
6882                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6883                                                   NULL, "item not supported");
6884                 switch (type) {
6885                 case RTE_FLOW_ITEM_TYPE_VOID:
6886                         break;
6887                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6888                         ret = flow_dv_validate_item_port_id
6889                                         (dev, items, attr, item_flags, error);
6890                         if (ret < 0)
6891                                 return ret;
6892                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6893                         port_id_item = items;
6894                         break;
6895                 case RTE_FLOW_ITEM_TYPE_ETH:
6896                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6897                                                           true, error);
6898                         if (ret < 0)
6899                                 return ret;
6900                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6901                                              MLX5_FLOW_LAYER_OUTER_L2;
6902                         if (items->mask != NULL && items->spec != NULL) {
6903                                 ether_type =
6904                                         ((const struct rte_flow_item_eth *)
6905                                          items->spec)->type;
6906                                 ether_type &=
6907                                         ((const struct rte_flow_item_eth *)
6908                                          items->mask)->type;
6909                                 ether_type = rte_be_to_cpu_16(ether_type);
6910                         } else {
6911                                 ether_type = 0;
6912                         }
6913                         break;
6914                 case RTE_FLOW_ITEM_TYPE_VLAN:
6915                         ret = flow_dv_validate_item_vlan(items, item_flags,
6916                                                          dev, error);
6917                         if (ret < 0)
6918                                 return ret;
6919                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6920                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6921                         if (items->mask != NULL && items->spec != NULL) {
6922                                 ether_type =
6923                                         ((const struct rte_flow_item_vlan *)
6924                                          items->spec)->inner_type;
6925                                 ether_type &=
6926                                         ((const struct rte_flow_item_vlan *)
6927                                          items->mask)->inner_type;
6928                                 ether_type = rte_be_to_cpu_16(ether_type);
6929                         } else {
6930                                 ether_type = 0;
6931                         }
6932                         /* Store outer VLAN mask for of_push_vlan action. */
6933                         if (!tunnel)
6934                                 vlan_m = items->mask;
6935                         break;
6936                 case RTE_FLOW_ITEM_TYPE_IPV4:
6937                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6938                                                   &item_flags, &tunnel);
6939                         ret = flow_dv_validate_item_ipv4(items, item_flags,
6940                                                          last_item, ether_type,
6941                                                          error);
6942                         if (ret < 0)
6943                                 return ret;
6944                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
6945                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
6946                         if (items->mask != NULL &&
6947                             ((const struct rte_flow_item_ipv4 *)
6948                              items->mask)->hdr.next_proto_id) {
6949                                 next_protocol =
6950                                         ((const struct rte_flow_item_ipv4 *)
6951                                          (items->spec))->hdr.next_proto_id;
6952                                 next_protocol &=
6953                                         ((const struct rte_flow_item_ipv4 *)
6954                                          (items->mask))->hdr.next_proto_id;
6955                         } else {
6956                                 /* Reset for inner layer. */
6957                                 next_protocol = 0xff;
6958                         }
6959                         break;
6960                 case RTE_FLOW_ITEM_TYPE_IPV6:
6961                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6962                                                   &item_flags, &tunnel);
6963                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
6964                                                            last_item,
6965                                                            ether_type,
6966                                                            &nic_ipv6_mask,
6967                                                            error);
6968                         if (ret < 0)
6969                                 return ret;
6970                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
6971                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
6972                         if (items->mask != NULL &&
6973                             ((const struct rte_flow_item_ipv6 *)
6974                              items->mask)->hdr.proto) {
6975                                 item_ipv6_proto =
6976                                         ((const struct rte_flow_item_ipv6 *)
6977                                          items->spec)->hdr.proto;
6978                                 next_protocol =
6979                                         ((const struct rte_flow_item_ipv6 *)
6980                                          items->spec)->hdr.proto;
6981                                 next_protocol &=
6982                                         ((const struct rte_flow_item_ipv6 *)
6983                                          items->mask)->hdr.proto;
6984                         } else {
6985                                 /* Reset for inner layer. */
6986                                 next_protocol = 0xff;
6987                         }
6988                         break;
6989                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
6990                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
6991                                                                   item_flags,
6992                                                                   error);
6993                         if (ret < 0)
6994                                 return ret;
6995                         last_item = tunnel ?
6996                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
6997                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
6998                         if (items->mask != NULL &&
6999                             ((const struct rte_flow_item_ipv6_frag_ext *)
7000                              items->mask)->hdr.next_header) {
7001                                 next_protocol =
7002                                 ((const struct rte_flow_item_ipv6_frag_ext *)
7003                                  items->spec)->hdr.next_header;
7004                                 next_protocol &=
7005                                 ((const struct rte_flow_item_ipv6_frag_ext *)
7006                                  items->mask)->hdr.next_header;
7007                         } else {
7008                                 /* Reset for inner layer. */
7009                                 next_protocol = 0xff;
7010                         }
7011                         break;
7012                 case RTE_FLOW_ITEM_TYPE_TCP:
7013                         ret = mlx5_flow_validate_item_tcp
7014                                                 (items, item_flags,
7015                                                  next_protocol,
7016                                                  &nic_tcp_mask,
7017                                                  error);
7018                         if (ret < 0)
7019                                 return ret;
7020                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
7021                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
7022                         break;
7023                 case RTE_FLOW_ITEM_TYPE_UDP:
7024                         ret = mlx5_flow_validate_item_udp(items, item_flags,
7025                                                           next_protocol,
7026                                                           error);
7027                         if (ret < 0)
7028                                 return ret;
7029                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
7030                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
7031                         break;
7032                 case RTE_FLOW_ITEM_TYPE_GRE:
7033                         ret = mlx5_flow_validate_item_gre(items, item_flags,
7034                                                           next_protocol, error);
7035                         if (ret < 0)
7036                                 return ret;
7037                         gre_item = items;
7038                         last_item = MLX5_FLOW_LAYER_GRE;
7039                         break;
7040                 case RTE_FLOW_ITEM_TYPE_NVGRE:
7041                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
7042                                                             next_protocol,
7043                                                             error);
7044                         if (ret < 0)
7045                                 return ret;
7046                         last_item = MLX5_FLOW_LAYER_NVGRE;
7047                         break;
7048                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
7049                         ret = mlx5_flow_validate_item_gre_key
7050                                 (items, item_flags, gre_item, error);
7051                         if (ret < 0)
7052                                 return ret;
7053                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
7054                         break;
7055                 case RTE_FLOW_ITEM_TYPE_VXLAN:
7056                         ret = mlx5_flow_validate_item_vxlan(dev, items,
7057                                                             item_flags, attr,
7058                                                             error);
7059                         if (ret < 0)
7060                                 return ret;
7061                         last_item = MLX5_FLOW_LAYER_VXLAN;
7062                         break;
7063                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7064                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
7065                                                                 item_flags, dev,
7066                                                                 error);
7067                         if (ret < 0)
7068                                 return ret;
7069                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
7070                         break;
7071                 case RTE_FLOW_ITEM_TYPE_GENEVE:
7072                         ret = mlx5_flow_validate_item_geneve(items,
7073                                                              item_flags, dev,
7074                                                              error);
7075                         if (ret < 0)
7076                                 return ret;
7077                         geneve_item = items;
7078                         last_item = MLX5_FLOW_LAYER_GENEVE;
7079                         break;
7080                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
7081                         ret = mlx5_flow_validate_item_geneve_opt(items,
7082                                                                  last_item,
7083                                                                  geneve_item,
7084                                                                  dev,
7085                                                                  error);
7086                         if (ret < 0)
7087                                 return ret;
7088                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
7089                         break;
7090                 case RTE_FLOW_ITEM_TYPE_MPLS:
7091                         ret = mlx5_flow_validate_item_mpls(dev, items,
7092                                                            item_flags,
7093                                                            last_item, error);
7094                         if (ret < 0)
7095                                 return ret;
7096                         last_item = MLX5_FLOW_LAYER_MPLS;
7097                         break;
7098
7099                 case RTE_FLOW_ITEM_TYPE_MARK:
7100                         ret = flow_dv_validate_item_mark(dev, items, attr,
7101                                                          error);
7102                         if (ret < 0)
7103                                 return ret;
7104                         last_item = MLX5_FLOW_ITEM_MARK;
7105                         break;
7106                 case RTE_FLOW_ITEM_TYPE_META:
7107                         ret = flow_dv_validate_item_meta(dev, items, attr,
7108                                                          error);
7109                         if (ret < 0)
7110                                 return ret;
7111                         last_item = MLX5_FLOW_ITEM_METADATA;
7112                         break;
7113                 case RTE_FLOW_ITEM_TYPE_ICMP:
7114                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
7115                                                            next_protocol,
7116                                                            error);
7117                         if (ret < 0)
7118                                 return ret;
7119                         last_item = MLX5_FLOW_LAYER_ICMP;
7120                         break;
7121                 case RTE_FLOW_ITEM_TYPE_ICMP6:
7122                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
7123                                                             next_protocol,
7124                                                             error);
7125                         if (ret < 0)
7126                                 return ret;
7127                         item_ipv6_proto = IPPROTO_ICMPV6;
7128                         last_item = MLX5_FLOW_LAYER_ICMP6;
7129                         break;
7130                 case RTE_FLOW_ITEM_TYPE_TAG:
7131                         ret = flow_dv_validate_item_tag(dev, items,
7132                                                         attr, error);
7133                         if (ret < 0)
7134                                 return ret;
7135                         last_item = MLX5_FLOW_ITEM_TAG;
7136                         break;
7137                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
7138                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
7139                         break;
7140                 case RTE_FLOW_ITEM_TYPE_GTP:
7141                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
7142                                                         error);
7143                         if (ret < 0)
7144                                 return ret;
7145                         gtp_item = items;
7146                         last_item = MLX5_FLOW_LAYER_GTP;
7147                         break;
7148                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
7149                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
7150                                                             gtp_item, attr,
7151                                                             error);
7152                         if (ret < 0)
7153                                 return ret;
7154                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
7155                         break;
7156                 case RTE_FLOW_ITEM_TYPE_ECPRI:
7157                         /* Capacity will be checked in the translate stage. */
7158                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
7159                                                             last_item,
7160                                                             ether_type,
7161                                                             &nic_ecpri_mask,
7162                                                             error);
7163                         if (ret < 0)
7164                                 return ret;
7165                         last_item = MLX5_FLOW_LAYER_ECPRI;
7166                         break;
7167                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
7168                         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY)
7169                                 return rte_flow_error_set
7170                                         (error, ENOTSUP,
7171                                          RTE_FLOW_ERROR_TYPE_ITEM,
7172                                          NULL, "multiple integrity items not supported");
7173                         ret = flow_dv_validate_item_integrity(dev, rule_items,
7174                                                               items, error);
7175                         if (ret < 0)
7176                                 return ret;
7177                         last_item = MLX5_FLOW_ITEM_INTEGRITY;
7178                         break;
7179                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
7180                         ret = flow_dv_validate_item_aso_ct(dev, items,
7181                                                            &item_flags, error);
7182                         if (ret < 0)
7183                                 return ret;
7184                         break;
7185                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
7186                         /* tunnel offload item was processed before
7187                          * list it here as a supported type
7188                          */
7189                         break;
7190                 default:
7191                         return rte_flow_error_set(error, ENOTSUP,
7192                                                   RTE_FLOW_ERROR_TYPE_ITEM,
7193                                                   NULL, "item not supported");
7194                 }
7195                 item_flags |= last_item;
7196         }
7197         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
7198                 int type = actions->type;
7199                 bool shared_count = false;
7200
7201                 if (!mlx5_flow_os_action_supported(type))
7202                         return rte_flow_error_set(error, ENOTSUP,
7203                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7204                                                   actions,
7205                                                   "action not supported");
7206                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
7207                         return rte_flow_error_set(error, ENOTSUP,
7208                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7209                                                   actions, "too many actions");
7210                 if (action_flags &
7211                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
7212                         return rte_flow_error_set(error, ENOTSUP,
7213                                 RTE_FLOW_ERROR_TYPE_ACTION,
7214                                 NULL, "meter action with policy "
7215                                 "must be the last action");
7216                 switch (type) {
7217                 case RTE_FLOW_ACTION_TYPE_VOID:
7218                         break;
7219                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7220                         ret = flow_dv_validate_action_port_id(dev,
7221                                                               action_flags,
7222                                                               actions,
7223                                                               attr,
7224                                                               error);
7225                         if (ret)
7226                                 return ret;
7227                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7228                         ++actions_n;
7229                         break;
7230                 case RTE_FLOW_ACTION_TYPE_FLAG:
7231                         ret = flow_dv_validate_action_flag(dev, action_flags,
7232                                                            attr, error);
7233                         if (ret < 0)
7234                                 return ret;
7235                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7236                                 /* Count all modify-header actions as one. */
7237                                 if (!(action_flags &
7238                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7239                                         ++actions_n;
7240                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
7241                                                 MLX5_FLOW_ACTION_MARK_EXT;
7242                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7243                                         modify_after_mirror = 1;
7244
7245                         } else {
7246                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
7247                                 ++actions_n;
7248                         }
7249                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7250                         break;
7251                 case RTE_FLOW_ACTION_TYPE_MARK:
7252                         ret = flow_dv_validate_action_mark(dev, actions,
7253                                                            action_flags,
7254                                                            attr, error);
7255                         if (ret < 0)
7256                                 return ret;
7257                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7258                                 /* Count all modify-header actions as one. */
7259                                 if (!(action_flags &
7260                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7261                                         ++actions_n;
7262                                 action_flags |= MLX5_FLOW_ACTION_MARK |
7263                                                 MLX5_FLOW_ACTION_MARK_EXT;
7264                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7265                                         modify_after_mirror = 1;
7266                         } else {
7267                                 action_flags |= MLX5_FLOW_ACTION_MARK;
7268                                 ++actions_n;
7269                         }
7270                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7271                         break;
7272                 case RTE_FLOW_ACTION_TYPE_SET_META:
7273                         ret = flow_dv_validate_action_set_meta(dev, actions,
7274                                                                action_flags,
7275                                                                attr, error);
7276                         if (ret < 0)
7277                                 return ret;
7278                         /* Count all modify-header actions as one action. */
7279                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7280                                 ++actions_n;
7281                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7282                                 modify_after_mirror = 1;
7283                         action_flags |= MLX5_FLOW_ACTION_SET_META;
7284                         rw_act_num += MLX5_ACT_NUM_SET_META;
7285                         break;
7286                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7287                         ret = flow_dv_validate_action_set_tag(dev, actions,
7288                                                               action_flags,
7289                                                               attr, error);
7290                         if (ret < 0)
7291                                 return ret;
7292                         /* Count all modify-header actions as one action. */
7293                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7294                                 ++actions_n;
7295                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7296                                 modify_after_mirror = 1;
7297                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7298                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7299                         break;
7300                 case RTE_FLOW_ACTION_TYPE_DROP:
7301                         ret = mlx5_flow_validate_action_drop(action_flags,
7302                                                              attr, error);
7303                         if (ret < 0)
7304                                 return ret;
7305                         action_flags |= MLX5_FLOW_ACTION_DROP;
7306                         ++actions_n;
7307                         break;
7308                 case RTE_FLOW_ACTION_TYPE_QUEUE:
7309                         ret = mlx5_flow_validate_action_queue(actions,
7310                                                               action_flags, dev,
7311                                                               attr, error);
7312                         if (ret < 0)
7313                                 return ret;
7314                         queue_index = ((const struct rte_flow_action_queue *)
7315                                                         (actions->conf))->index;
7316                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
7317                         ++actions_n;
7318                         break;
7319                 case RTE_FLOW_ACTION_TYPE_RSS:
7320                         rss = actions->conf;
7321                         ret = mlx5_flow_validate_action_rss(actions,
7322                                                             action_flags, dev,
7323                                                             attr, item_flags,
7324                                                             error);
7325                         if (ret < 0)
7326                                 return ret;
7327                         if (rss && sample_rss &&
7328                             (sample_rss->level != rss->level ||
7329                             sample_rss->types != rss->types))
7330                                 return rte_flow_error_set(error, ENOTSUP,
7331                                         RTE_FLOW_ERROR_TYPE_ACTION,
7332                                         NULL,
7333                                         "Can't use the different RSS types "
7334                                         "or level in the same flow");
7335                         if (rss != NULL && rss->queue_num)
7336                                 queue_index = rss->queue[0];
7337                         action_flags |= MLX5_FLOW_ACTION_RSS;
7338                         ++actions_n;
7339                         break;
7340                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7341                         ret =
7342                         mlx5_flow_validate_action_default_miss(action_flags,
7343                                         attr, error);
7344                         if (ret < 0)
7345                                 return ret;
7346                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7347                         ++actions_n;
7348                         break;
7349                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
7350                 case RTE_FLOW_ACTION_TYPE_COUNT:
7351                         shared_count = is_shared_action_count(actions);
7352                         ret = flow_dv_validate_action_count(dev, shared_count,
7353                                                             action_flags,
7354                                                             error);
7355                         if (ret < 0)
7356                                 return ret;
7357                         action_flags |= MLX5_FLOW_ACTION_COUNT;
7358                         ++actions_n;
7359                         break;
7360                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7361                         if (flow_dv_validate_action_pop_vlan(dev,
7362                                                              action_flags,
7363                                                              actions,
7364                                                              item_flags, attr,
7365                                                              error))
7366                                 return -rte_errno;
7367                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7368                                 modify_after_mirror = 1;
7369                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7370                         ++actions_n;
7371                         break;
7372                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7373                         ret = flow_dv_validate_action_push_vlan(dev,
7374                                                                 action_flags,
7375                                                                 vlan_m,
7376                                                                 actions, attr,
7377                                                                 error);
7378                         if (ret < 0)
7379                                 return ret;
7380                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7381                                 modify_after_mirror = 1;
7382                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7383                         ++actions_n;
7384                         break;
7385                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7386                         ret = flow_dv_validate_action_set_vlan_pcp
7387                                                 (action_flags, actions, error);
7388                         if (ret < 0)
7389                                 return ret;
7390                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7391                                 modify_after_mirror = 1;
7392                         /* Count PCP with push_vlan command. */
7393                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
7394                         break;
7395                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7396                         ret = flow_dv_validate_action_set_vlan_vid
7397                                                 (item_flags, action_flags,
7398                                                  actions, error);
7399                         if (ret < 0)
7400                                 return ret;
7401                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7402                                 modify_after_mirror = 1;
7403                         /* Count VID with push_vlan command. */
7404                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7405                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
7406                         break;
7407                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7408                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7409                         ret = flow_dv_validate_action_l2_encap(dev,
7410                                                                action_flags,
7411                                                                actions, attr,
7412                                                                error);
7413                         if (ret < 0)
7414                                 return ret;
7415                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
7416                         ++actions_n;
7417                         break;
7418                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7419                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7420                         ret = flow_dv_validate_action_decap(dev, action_flags,
7421                                                             actions, item_flags,
7422                                                             attr, error);
7423                         if (ret < 0)
7424                                 return ret;
7425                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7426                                 modify_after_mirror = 1;
7427                         action_flags |= MLX5_FLOW_ACTION_DECAP;
7428                         ++actions_n;
7429                         break;
7430                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7431                         ret = flow_dv_validate_action_raw_encap_decap
7432                                 (dev, NULL, actions->conf, attr, &action_flags,
7433                                  &actions_n, actions, item_flags, error);
7434                         if (ret < 0)
7435                                 return ret;
7436                         break;
7437                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7438                         decap = actions->conf;
7439                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
7440                                 ;
7441                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7442                                 encap = NULL;
7443                                 actions--;
7444                         } else {
7445                                 encap = actions->conf;
7446                         }
7447                         ret = flow_dv_validate_action_raw_encap_decap
7448                                            (dev,
7449                                             decap ? decap : &empty_decap, encap,
7450                                             attr, &action_flags, &actions_n,
7451                                             actions, item_flags, error);
7452                         if (ret < 0)
7453                                 return ret;
7454                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7455                             (action_flags & MLX5_FLOW_ACTION_DECAP))
7456                                 modify_after_mirror = 1;
7457                         break;
7458                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7459                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7460                         ret = flow_dv_validate_action_modify_mac(action_flags,
7461                                                                  actions,
7462                                                                  item_flags,
7463                                                                  error);
7464                         if (ret < 0)
7465                                 return ret;
7466                         /* Count all modify-header actions as one action. */
7467                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7468                                 ++actions_n;
7469                         action_flags |= actions->type ==
7470                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7471                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
7472                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
7473                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7474                                 modify_after_mirror = 1;
7475                         /*
7476                          * Even if the source and destination MAC addresses have
7477                          * overlap in the header with 4B alignment, the convert
7478                          * function will handle them separately and 4 SW actions
7479                          * will be created. And 2 actions will be added each
7480                          * time no matter how many bytes of address will be set.
7481                          */
7482                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
7483                         break;
7484                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7485                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7486                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
7487                                                                   actions,
7488                                                                   item_flags,
7489                                                                   error);
7490                         if (ret < 0)
7491                                 return ret;
7492                         /* Count all modify-header actions as one action. */
7493                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7494                                 ++actions_n;
7495                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7496                                 modify_after_mirror = 1;
7497                         action_flags |= actions->type ==
7498                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7499                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
7500                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
7501                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
7502                         break;
7503                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7504                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7505                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
7506                                                                   actions,
7507                                                                   item_flags,
7508                                                                   error);
7509                         if (ret < 0)
7510                                 return ret;
7511                         if (item_ipv6_proto == IPPROTO_ICMPV6)
7512                                 return rte_flow_error_set(error, ENOTSUP,
7513                                         RTE_FLOW_ERROR_TYPE_ACTION,
7514                                         actions,
7515                                         "Can't change header "
7516                                         "with ICMPv6 proto");
7517                         /* Count all modify-header actions as one action. */
7518                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7519                                 ++actions_n;
7520                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7521                                 modify_after_mirror = 1;
7522                         action_flags |= actions->type ==
7523                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7524                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7525                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
7526                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
7527                         break;
7528                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7529                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7530                         ret = flow_dv_validate_action_modify_tp(action_flags,
7531                                                                 actions,
7532                                                                 item_flags,
7533                                                                 error);
7534                         if (ret < 0)
7535                                 return ret;
7536                         /* Count all modify-header actions as one action. */
7537                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7538                                 ++actions_n;
7539                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7540                                 modify_after_mirror = 1;
7541                         action_flags |= actions->type ==
7542                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7543                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
7544                                                 MLX5_FLOW_ACTION_SET_TP_DST;
7545                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7546                         break;
7547                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7548                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7549                         ret = flow_dv_validate_action_modify_ttl(action_flags,
7550                                                                  actions,
7551                                                                  item_flags,
7552                                                                  error);
7553                         if (ret < 0)
7554                                 return ret;
7555                         /* Count all modify-header actions as one action. */
7556                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7557                                 ++actions_n;
7558                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7559                                 modify_after_mirror = 1;
7560                         action_flags |= actions->type ==
7561                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
7562                                                 MLX5_FLOW_ACTION_SET_TTL :
7563                                                 MLX5_FLOW_ACTION_DEC_TTL;
7564                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7565                         break;
7566                 case RTE_FLOW_ACTION_TYPE_JUMP:
7567                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7568                                                            action_flags,
7569                                                            attr, external,
7570                                                            error);
7571                         if (ret)
7572                                 return ret;
7573                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7574                             fdb_mirror_limit)
7575                                 return rte_flow_error_set(error, EINVAL,
7576                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7577                                                   NULL,
7578                                                   "sample and jump action combination is not supported");
7579                         ++actions_n;
7580                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7581                         break;
7582                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7583                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7584                         ret = flow_dv_validate_action_modify_tcp_seq
7585                                                                 (action_flags,
7586                                                                  actions,
7587                                                                  item_flags,
7588                                                                  error);
7589                         if (ret < 0)
7590                                 return ret;
7591                         /* Count all modify-header actions as one action. */
7592                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7593                                 ++actions_n;
7594                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7595                                 modify_after_mirror = 1;
7596                         action_flags |= actions->type ==
7597                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7598                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7599                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7600                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7601                         break;
7602                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7603                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7604                         ret = flow_dv_validate_action_modify_tcp_ack
7605                                                                 (action_flags,
7606                                                                  actions,
7607                                                                  item_flags,
7608                                                                  error);
7609                         if (ret < 0)
7610                                 return ret;
7611                         /* Count all modify-header actions as one action. */
7612                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7613                                 ++actions_n;
7614                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7615                                 modify_after_mirror = 1;
7616                         action_flags |= actions->type ==
7617                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7618                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
7619                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7620                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7621                         break;
7622                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7623                         break;
7624                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7625                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7626                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7627                         break;
7628                 case RTE_FLOW_ACTION_TYPE_METER:
7629                         ret = mlx5_flow_validate_action_meter(dev,
7630                                                               action_flags,
7631                                                               actions, attr,
7632                                                               port_id_item,
7633                                                               &def_policy,
7634                                                               error);
7635                         if (ret < 0)
7636                                 return ret;
7637                         action_flags |= MLX5_FLOW_ACTION_METER;
7638                         if (!def_policy)
7639                                 action_flags |=
7640                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7641                         ++actions_n;
7642                         /* Meter action will add one more TAG action. */
7643                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7644                         break;
7645                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7646                         if (!attr->transfer && !attr->group)
7647                                 return rte_flow_error_set(error, ENOTSUP,
7648                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7649                                                                            NULL,
7650                           "Shared ASO age action is not supported for group 0");
7651                         if (action_flags & MLX5_FLOW_ACTION_AGE)
7652                                 return rte_flow_error_set
7653                                                   (error, EINVAL,
7654                                                    RTE_FLOW_ERROR_TYPE_ACTION,
7655                                                    NULL,
7656                                                    "duplicate age actions set");
7657                         action_flags |= MLX5_FLOW_ACTION_AGE;
7658                         ++actions_n;
7659                         break;
7660                 case RTE_FLOW_ACTION_TYPE_AGE:
7661                         ret = flow_dv_validate_action_age(action_flags,
7662                                                           actions, dev,
7663                                                           error);
7664                         if (ret < 0)
7665                                 return ret;
7666                         /*
7667                          * Validate the regular AGE action (using counter)
7668                          * mutual exclusion with share counter actions.
7669                          */
7670                         if (!priv->sh->flow_hit_aso_en) {
7671                                 if (shared_count)
7672                                         return rte_flow_error_set
7673                                                 (error, EINVAL,
7674                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7675                                                 NULL,
7676                                                 "old age and shared count combination is not supported");
7677                                 if (sample_count)
7678                                         return rte_flow_error_set
7679                                                 (error, EINVAL,
7680                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7681                                                 NULL,
7682                                                 "old age action and count must be in the same sub flow");
7683                         }
7684                         action_flags |= MLX5_FLOW_ACTION_AGE;
7685                         ++actions_n;
7686                         break;
7687                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7688                         ret = flow_dv_validate_action_modify_ipv4_dscp
7689                                                          (action_flags,
7690                                                           actions,
7691                                                           item_flags,
7692                                                           error);
7693                         if (ret < 0)
7694                                 return ret;
7695                         /* Count all modify-header actions as one action. */
7696                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7697                                 ++actions_n;
7698                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7699                                 modify_after_mirror = 1;
7700                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7701                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7702                         break;
7703                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7704                         ret = flow_dv_validate_action_modify_ipv6_dscp
7705                                                                 (action_flags,
7706                                                                  actions,
7707                                                                  item_flags,
7708                                                                  error);
7709                         if (ret < 0)
7710                                 return ret;
7711                         /* Count all modify-header actions as one action. */
7712                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7713                                 ++actions_n;
7714                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7715                                 modify_after_mirror = 1;
7716                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7717                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7718                         break;
7719                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7720                         ret = flow_dv_validate_action_sample(&action_flags,
7721                                                              actions, dev,
7722                                                              attr, item_flags,
7723                                                              rss, &sample_rss,
7724                                                              &sample_count,
7725                                                              &fdb_mirror_limit,
7726                                                              error);
7727                         if (ret < 0)
7728                                 return ret;
7729                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7730                         ++actions_n;
7731                         break;
7732                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7733                         ret = flow_dv_validate_action_modify_field(dev,
7734                                                                    action_flags,
7735                                                                    actions,
7736                                                                    attr,
7737                                                                    error);
7738                         if (ret < 0)
7739                                 return ret;
7740                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7741                                 modify_after_mirror = 1;
7742                         /* Count all modify-header actions as one action. */
7743                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7744                                 ++actions_n;
7745                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7746                         rw_act_num += ret;
7747                         break;
7748                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7749                         ret = flow_dv_validate_action_aso_ct(dev, action_flags,
7750                                                              item_flags, attr,
7751                                                              error);
7752                         if (ret < 0)
7753                                 return ret;
7754                         action_flags |= MLX5_FLOW_ACTION_CT;
7755                         break;
7756                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7757                         /* tunnel offload action was processed before
7758                          * list it here as a supported type
7759                          */
7760                         break;
7761                 default:
7762                         return rte_flow_error_set(error, ENOTSUP,
7763                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7764                                                   actions,
7765                                                   "action not supported");
7766                 }
7767         }
7768         /*
7769          * Validate actions in flow rules
7770          * - Explicit decap action is prohibited by the tunnel offload API.
7771          * - Drop action in tunnel steer rule is prohibited by the API.
7772          * - Application cannot use MARK action because it's value can mask
7773          *   tunnel default miss nitification.
7774          * - JUMP in tunnel match rule has no support in current PMD
7775          *   implementation.
7776          * - TAG & META are reserved for future uses.
7777          */
7778         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7779                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
7780                                             MLX5_FLOW_ACTION_MARK     |
7781                                             MLX5_FLOW_ACTION_SET_TAG  |
7782                                             MLX5_FLOW_ACTION_SET_META |
7783                                             MLX5_FLOW_ACTION_DROP;
7784
7785                 if (action_flags & bad_actions_mask)
7786                         return rte_flow_error_set
7787                                         (error, EINVAL,
7788                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7789                                         "Invalid RTE action in tunnel "
7790                                         "set decap rule");
7791                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7792                         return rte_flow_error_set
7793                                         (error, EINVAL,
7794                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7795                                         "tunnel set decap rule must terminate "
7796                                         "with JUMP");
7797                 if (!attr->ingress)
7798                         return rte_flow_error_set
7799                                         (error, EINVAL,
7800                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7801                                         "tunnel flows for ingress traffic only");
7802         }
7803         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7804                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
7805                                             MLX5_FLOW_ACTION_MARK    |
7806                                             MLX5_FLOW_ACTION_SET_TAG |
7807                                             MLX5_FLOW_ACTION_SET_META;
7808
7809                 if (action_flags & bad_actions_mask)
7810                         return rte_flow_error_set
7811                                         (error, EINVAL,
7812                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7813                                         "Invalid RTE action in tunnel "
7814                                         "set match rule");
7815         }
7816         /*
7817          * Validate the drop action mutual exclusion with other actions.
7818          * Drop action is mutually-exclusive with any other action, except for
7819          * Count action.
7820          * Drop action compatibility with tunnel offload was already validated.
7821          */
7822         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7823                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
7824         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7825             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7826                 return rte_flow_error_set(error, EINVAL,
7827                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7828                                           "Drop action is mutually-exclusive "
7829                                           "with any other action, except for "
7830                                           "Count action");
7831         /* Eswitch has few restrictions on using items and actions */
7832         if (attr->transfer) {
7833                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7834                     action_flags & MLX5_FLOW_ACTION_FLAG)
7835                         return rte_flow_error_set(error, ENOTSUP,
7836                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7837                                                   NULL,
7838                                                   "unsupported action FLAG");
7839                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7840                     action_flags & MLX5_FLOW_ACTION_MARK)
7841                         return rte_flow_error_set(error, ENOTSUP,
7842                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7843                                                   NULL,
7844                                                   "unsupported action MARK");
7845                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7846                         return rte_flow_error_set(error, ENOTSUP,
7847                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7848                                                   NULL,
7849                                                   "unsupported action QUEUE");
7850                 if (action_flags & MLX5_FLOW_ACTION_RSS)
7851                         return rte_flow_error_set(error, ENOTSUP,
7852                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7853                                                   NULL,
7854                                                   "unsupported action RSS");
7855                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7856                         return rte_flow_error_set(error, EINVAL,
7857                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7858                                                   actions,
7859                                                   "no fate action is found");
7860         } else {
7861                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7862                         return rte_flow_error_set(error, EINVAL,
7863                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7864                                                   actions,
7865                                                   "no fate action is found");
7866         }
7867         /*
7868          * Continue validation for Xcap and VLAN actions.
7869          * If hairpin is working in explicit TX rule mode, there is no actions
7870          * splitting and the validation of hairpin ingress flow should be the
7871          * same as other standard flows.
7872          */
7873         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7874                              MLX5_FLOW_VLAN_ACTIONS)) &&
7875             (queue_index == 0xFFFF ||
7876              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7877              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7878              conf->tx_explicit != 0))) {
7879                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7880                     MLX5_FLOW_XCAP_ACTIONS)
7881                         return rte_flow_error_set(error, ENOTSUP,
7882                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7883                                                   NULL, "encap and decap "
7884                                                   "combination aren't supported");
7885                 if (!attr->transfer && attr->ingress) {
7886                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7887                                 return rte_flow_error_set
7888                                                 (error, ENOTSUP,
7889                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7890                                                  NULL, "encap is not supported"
7891                                                  " for ingress traffic");
7892                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7893                                 return rte_flow_error_set
7894                                                 (error, ENOTSUP,
7895                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7896                                                  NULL, "push VLAN action not "
7897                                                  "supported for ingress");
7898                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7899                                         MLX5_FLOW_VLAN_ACTIONS)
7900                                 return rte_flow_error_set
7901                                                 (error, ENOTSUP,
7902                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7903                                                  NULL, "no support for "
7904                                                  "multiple VLAN actions");
7905                 }
7906         }
7907         if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
7908                 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
7909                         ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
7910                         attr->ingress)
7911                         return rte_flow_error_set
7912                                 (error, ENOTSUP,
7913                                 RTE_FLOW_ERROR_TYPE_ACTION,
7914                                 NULL, "fate action not supported for "
7915                                 "meter with policy");
7916                 if (attr->egress) {
7917                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
7918                                 return rte_flow_error_set
7919                                         (error, ENOTSUP,
7920                                         RTE_FLOW_ERROR_TYPE_ACTION,
7921                                         NULL, "modify header action in egress "
7922                                         "cannot be done before meter action");
7923                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7924                                 return rte_flow_error_set
7925                                         (error, ENOTSUP,
7926                                         RTE_FLOW_ERROR_TYPE_ACTION,
7927                                         NULL, "encap action in egress "
7928                                         "cannot be done before meter action");
7929                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7930                                 return rte_flow_error_set
7931                                         (error, ENOTSUP,
7932                                         RTE_FLOW_ERROR_TYPE_ACTION,
7933                                         NULL, "push vlan action in egress "
7934                                         "cannot be done before meter action");
7935                 }
7936         }
7937         /*
7938          * Hairpin flow will add one more TAG action in TX implicit mode.
7939          * In TX explicit mode, there will be no hairpin flow ID.
7940          */
7941         if (hairpin > 0)
7942                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7943         /* extra metadata enabled: one more TAG action will be add. */
7944         if (dev_conf->dv_flow_en &&
7945             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
7946             mlx5_flow_ext_mreg_supported(dev))
7947                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7948         if (rw_act_num >
7949                         flow_dv_modify_hdr_action_max(dev, is_root)) {
7950                 return rte_flow_error_set(error, ENOTSUP,
7951                                           RTE_FLOW_ERROR_TYPE_ACTION,
7952                                           NULL, "too many header modify"
7953                                           " actions to support");
7954         }
7955         /* Eswitch egress mirror and modify flow has limitation on CX5 */
7956         if (fdb_mirror_limit && modify_after_mirror)
7957                 return rte_flow_error_set(error, EINVAL,
7958                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7959                                 "sample before modify action is not supported");
7960         return 0;
7961 }
7962
7963 /**
7964  * Internal preparation function. Allocates the DV flow size,
7965  * this size is constant.
7966  *
7967  * @param[in] dev
7968  *   Pointer to the rte_eth_dev structure.
7969  * @param[in] attr
7970  *   Pointer to the flow attributes.
7971  * @param[in] items
7972  *   Pointer to the list of items.
7973  * @param[in] actions
7974  *   Pointer to the list of actions.
7975  * @param[out] error
7976  *   Pointer to the error structure.
7977  *
7978  * @return
7979  *   Pointer to mlx5_flow object on success,
7980  *   otherwise NULL and rte_errno is set.
7981  */
7982 static struct mlx5_flow *
7983 flow_dv_prepare(struct rte_eth_dev *dev,
7984                 const struct rte_flow_attr *attr __rte_unused,
7985                 const struct rte_flow_item items[] __rte_unused,
7986                 const struct rte_flow_action actions[] __rte_unused,
7987                 struct rte_flow_error *error)
7988 {
7989         uint32_t handle_idx = 0;
7990         struct mlx5_flow *dev_flow;
7991         struct mlx5_flow_handle *dev_handle;
7992         struct mlx5_priv *priv = dev->data->dev_private;
7993         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
7994
7995         MLX5_ASSERT(wks);
7996         wks->skip_matcher_reg = 0;
7997         wks->policy = NULL;
7998         wks->final_policy = NULL;
7999         /* In case of corrupting the memory. */
8000         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
8001                 rte_flow_error_set(error, ENOSPC,
8002                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8003                                    "not free temporary device flow");
8004                 return NULL;
8005         }
8006         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
8007                                    &handle_idx);
8008         if (!dev_handle) {
8009                 rte_flow_error_set(error, ENOMEM,
8010                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8011                                    "not enough memory to create flow handle");
8012                 return NULL;
8013         }
8014         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
8015         dev_flow = &wks->flows[wks->flow_idx++];
8016         memset(dev_flow, 0, sizeof(*dev_flow));
8017         dev_flow->handle = dev_handle;
8018         dev_flow->handle_idx = handle_idx;
8019         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
8020         dev_flow->ingress = attr->ingress;
8021         dev_flow->dv.transfer = attr->transfer;
8022         return dev_flow;
8023 }
8024
8025 #ifdef RTE_LIBRTE_MLX5_DEBUG
8026 /**
8027  * Sanity check for match mask and value. Similar to check_valid_spec() in
8028  * kernel driver. If unmasked bit is present in value, it returns failure.
8029  *
8030  * @param match_mask
8031  *   pointer to match mask buffer.
8032  * @param match_value
8033  *   pointer to match value buffer.
8034  *
8035  * @return
8036  *   0 if valid, -EINVAL otherwise.
8037  */
8038 static int
8039 flow_dv_check_valid_spec(void *match_mask, void *match_value)
8040 {
8041         uint8_t *m = match_mask;
8042         uint8_t *v = match_value;
8043         unsigned int i;
8044
8045         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
8046                 if (v[i] & ~m[i]) {
8047                         DRV_LOG(ERR,
8048                                 "match_value differs from match_criteria"
8049                                 " %p[%u] != %p[%u]",
8050                                 match_value, i, match_mask, i);
8051                         return -EINVAL;
8052                 }
8053         }
8054         return 0;
8055 }
8056 #endif
8057
8058 /**
8059  * Add match of ip_version.
8060  *
8061  * @param[in] group
8062  *   Flow group.
8063  * @param[in] headers_v
8064  *   Values header pointer.
8065  * @param[in] headers_m
8066  *   Masks header pointer.
8067  * @param[in] ip_version
8068  *   The IP version to set.
8069  */
8070 static inline void
8071 flow_dv_set_match_ip_version(uint32_t group,
8072                              void *headers_v,
8073                              void *headers_m,
8074                              uint8_t ip_version)
8075 {
8076         if (group == 0)
8077                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
8078         else
8079                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
8080                          ip_version);
8081         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
8082         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
8083         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
8084 }
8085
8086 /**
8087  * Add Ethernet item to matcher and to the value.
8088  *
8089  * @param[in, out] matcher
8090  *   Flow matcher.
8091  * @param[in, out] key
8092  *   Flow matcher value.
8093  * @param[in] item
8094  *   Flow pattern to translate.
8095  * @param[in] inner
8096  *   Item is inner pattern.
8097  */
8098 static void
8099 flow_dv_translate_item_eth(void *matcher, void *key,
8100                            const struct rte_flow_item *item, int inner,
8101                            uint32_t group)
8102 {
8103         const struct rte_flow_item_eth *eth_m = item->mask;
8104         const struct rte_flow_item_eth *eth_v = item->spec;
8105         const struct rte_flow_item_eth nic_mask = {
8106                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8107                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8108                 .type = RTE_BE16(0xffff),
8109                 .has_vlan = 0,
8110         };
8111         void *hdrs_m;
8112         void *hdrs_v;
8113         char *l24_v;
8114         unsigned int i;
8115
8116         if (!eth_v)
8117                 return;
8118         if (!eth_m)
8119                 eth_m = &nic_mask;
8120         if (inner) {
8121                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8122                                          inner_headers);
8123                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8124         } else {
8125                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8126                                          outer_headers);
8127                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8128         }
8129         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
8130                &eth_m->dst, sizeof(eth_m->dst));
8131         /* The value must be in the range of the mask. */
8132         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
8133         for (i = 0; i < sizeof(eth_m->dst); ++i)
8134                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
8135         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
8136                &eth_m->src, sizeof(eth_m->src));
8137         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
8138         /* The value must be in the range of the mask. */
8139         for (i = 0; i < sizeof(eth_m->dst); ++i)
8140                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
8141         /*
8142          * HW supports match on one Ethertype, the Ethertype following the last
8143          * VLAN tag of the packet (see PRM).
8144          * Set match on ethertype only if ETH header is not followed by VLAN.
8145          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8146          * ethertype, and use ip_version field instead.
8147          * eCPRI over Ether layer will use type value 0xAEFE.
8148          */
8149         if (eth_m->type == 0xFFFF) {
8150                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
8151                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8152                 switch (eth_v->type) {
8153                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8154                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8155                         return;
8156                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
8157                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8158                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8159                         return;
8160                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8161                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8162                         return;
8163                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8164                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8165                         return;
8166                 default:
8167                         break;
8168                 }
8169         }
8170         if (eth_m->has_vlan) {
8171                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8172                 if (eth_v->has_vlan) {
8173                         /*
8174                          * Here, when also has_more_vlan field in VLAN item is
8175                          * not set, only single-tagged packets will be matched.
8176                          */
8177                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8178                         return;
8179                 }
8180         }
8181         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8182                  rte_be_to_cpu_16(eth_m->type));
8183         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
8184         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
8185 }
8186
8187 /**
8188  * Add VLAN item to matcher and to the value.
8189  *
8190  * @param[in, out] dev_flow
8191  *   Flow descriptor.
8192  * @param[in, out] matcher
8193  *   Flow matcher.
8194  * @param[in, out] key
8195  *   Flow matcher value.
8196  * @param[in] item
8197  *   Flow pattern to translate.
8198  * @param[in] inner
8199  *   Item is inner pattern.
8200  */
8201 static void
8202 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
8203                             void *matcher, void *key,
8204                             const struct rte_flow_item *item,
8205                             int inner, uint32_t group)
8206 {
8207         const struct rte_flow_item_vlan *vlan_m = item->mask;
8208         const struct rte_flow_item_vlan *vlan_v = item->spec;
8209         void *hdrs_m;
8210         void *hdrs_v;
8211         uint16_t tci_m;
8212         uint16_t tci_v;
8213
8214         if (inner) {
8215                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8216                                          inner_headers);
8217                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8218         } else {
8219                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8220                                          outer_headers);
8221                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8222                 /*
8223                  * This is workaround, masks are not supported,
8224                  * and pre-validated.
8225                  */
8226                 if (vlan_v)
8227                         dev_flow->handle->vf_vlan.tag =
8228                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
8229         }
8230         /*
8231          * When VLAN item exists in flow, mark packet as tagged,
8232          * even if TCI is not specified.
8233          */
8234         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
8235                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8236                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8237         }
8238         if (!vlan_v)
8239                 return;
8240         if (!vlan_m)
8241                 vlan_m = &rte_flow_item_vlan_mask;
8242         tci_m = rte_be_to_cpu_16(vlan_m->tci);
8243         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
8244         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
8245         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
8246         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
8247         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
8248         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
8249         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
8250         /*
8251          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8252          * ethertype, and use ip_version field instead.
8253          */
8254         if (vlan_m->inner_type == 0xFFFF) {
8255                 switch (vlan_v->inner_type) {
8256                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8257                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8258                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8259                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8260                         return;
8261                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8262                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8263                         return;
8264                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8265                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8266                         return;
8267                 default:
8268                         break;
8269                 }
8270         }
8271         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
8272                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8273                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8274                 /* Only one vlan_tag bit can be set. */
8275                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8276                 return;
8277         }
8278         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8279                  rte_be_to_cpu_16(vlan_m->inner_type));
8280         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
8281                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
8282 }
8283
8284 /**
8285  * Add IPV4 item to matcher and to the value.
8286  *
8287  * @param[in, out] matcher
8288  *   Flow matcher.
8289  * @param[in, out] key
8290  *   Flow matcher value.
8291  * @param[in] item
8292  *   Flow pattern to translate.
8293  * @param[in] inner
8294  *   Item is inner pattern.
8295  * @param[in] group
8296  *   The group to insert the rule.
8297  */
8298 static void
8299 flow_dv_translate_item_ipv4(void *matcher, void *key,
8300                             const struct rte_flow_item *item,
8301                             int inner, uint32_t group)
8302 {
8303         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
8304         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
8305         const struct rte_flow_item_ipv4 nic_mask = {
8306                 .hdr = {
8307                         .src_addr = RTE_BE32(0xffffffff),
8308                         .dst_addr = RTE_BE32(0xffffffff),
8309                         .type_of_service = 0xff,
8310                         .next_proto_id = 0xff,
8311                         .time_to_live = 0xff,
8312                 },
8313         };
8314         void *headers_m;
8315         void *headers_v;
8316         char *l24_m;
8317         char *l24_v;
8318         uint8_t tos;
8319
8320         if (inner) {
8321                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8322                                          inner_headers);
8323                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8324         } else {
8325                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8326                                          outer_headers);
8327                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8328         }
8329         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
8330         if (!ipv4_v)
8331                 return;
8332         if (!ipv4_m)
8333                 ipv4_m = &nic_mask;
8334         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8335                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8336         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8337                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8338         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
8339         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
8340         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8341                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8342         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8343                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8344         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
8345         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
8346         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
8347         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
8348                  ipv4_m->hdr.type_of_service);
8349         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
8350         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
8351                  ipv4_m->hdr.type_of_service >> 2);
8352         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
8353         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8354                  ipv4_m->hdr.next_proto_id);
8355         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8356                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
8357         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8358                  ipv4_m->hdr.time_to_live);
8359         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8360                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
8361         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8362                  !!(ipv4_m->hdr.fragment_offset));
8363         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8364                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
8365 }
8366
8367 /**
8368  * Add IPV6 item to matcher and to the value.
8369  *
8370  * @param[in, out] matcher
8371  *   Flow matcher.
8372  * @param[in, out] key
8373  *   Flow matcher value.
8374  * @param[in] item
8375  *   Flow pattern to translate.
8376  * @param[in] inner
8377  *   Item is inner pattern.
8378  * @param[in] group
8379  *   The group to insert the rule.
8380  */
8381 static void
8382 flow_dv_translate_item_ipv6(void *matcher, void *key,
8383                             const struct rte_flow_item *item,
8384                             int inner, uint32_t group)
8385 {
8386         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
8387         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
8388         const struct rte_flow_item_ipv6 nic_mask = {
8389                 .hdr = {
8390                         .src_addr =
8391                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8392                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8393                         .dst_addr =
8394                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8395                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8396                         .vtc_flow = RTE_BE32(0xffffffff),
8397                         .proto = 0xff,
8398                         .hop_limits = 0xff,
8399                 },
8400         };
8401         void *headers_m;
8402         void *headers_v;
8403         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8404         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8405         char *l24_m;
8406         char *l24_v;
8407         uint32_t vtc_m;
8408         uint32_t vtc_v;
8409         int i;
8410         int size;
8411
8412         if (inner) {
8413                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8414                                          inner_headers);
8415                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8416         } else {
8417                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8418                                          outer_headers);
8419                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8420         }
8421         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
8422         if (!ipv6_v)
8423                 return;
8424         if (!ipv6_m)
8425                 ipv6_m = &nic_mask;
8426         size = sizeof(ipv6_m->hdr.dst_addr);
8427         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8428                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8429         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8430                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8431         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
8432         for (i = 0; i < size; ++i)
8433                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
8434         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8435                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8436         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8437                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8438         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
8439         for (i = 0; i < size; ++i)
8440                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
8441         /* TOS. */
8442         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
8443         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
8444         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
8445         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
8446         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
8447         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
8448         /* Label. */
8449         if (inner) {
8450                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
8451                          vtc_m);
8452                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
8453                          vtc_v);
8454         } else {
8455                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
8456                          vtc_m);
8457                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
8458                          vtc_v);
8459         }
8460         /* Protocol. */
8461         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8462                  ipv6_m->hdr.proto);
8463         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8464                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
8465         /* Hop limit. */
8466         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8467                  ipv6_m->hdr.hop_limits);
8468         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8469                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
8470         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8471                  !!(ipv6_m->has_frag_ext));
8472         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8473                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
8474 }
8475
8476 /**
8477  * Add IPV6 fragment extension item to matcher and to the value.
8478  *
8479  * @param[in, out] matcher
8480  *   Flow matcher.
8481  * @param[in, out] key
8482  *   Flow matcher value.
8483  * @param[in] item
8484  *   Flow pattern to translate.
8485  * @param[in] inner
8486  *   Item is inner pattern.
8487  */
8488 static void
8489 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
8490                                      const struct rte_flow_item *item,
8491                                      int inner)
8492 {
8493         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
8494         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
8495         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
8496                 .hdr = {
8497                         .next_header = 0xff,
8498                         .frag_data = RTE_BE16(0xffff),
8499                 },
8500         };
8501         void *headers_m;
8502         void *headers_v;
8503
8504         if (inner) {
8505                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8506                                          inner_headers);
8507                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8508         } else {
8509                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8510                                          outer_headers);
8511                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8512         }
8513         /* IPv6 fragment extension item exists, so packet is IP fragment. */
8514         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
8515         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
8516         if (!ipv6_frag_ext_v)
8517                 return;
8518         if (!ipv6_frag_ext_m)
8519                 ipv6_frag_ext_m = &nic_mask;
8520         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8521                  ipv6_frag_ext_m->hdr.next_header);
8522         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8523                  ipv6_frag_ext_v->hdr.next_header &
8524                  ipv6_frag_ext_m->hdr.next_header);
8525 }
8526
8527 /**
8528  * Add TCP item to matcher and to the value.
8529  *
8530  * @param[in, out] matcher
8531  *   Flow matcher.
8532  * @param[in, out] key
8533  *   Flow matcher value.
8534  * @param[in] item
8535  *   Flow pattern to translate.
8536  * @param[in] inner
8537  *   Item is inner pattern.
8538  */
8539 static void
8540 flow_dv_translate_item_tcp(void *matcher, void *key,
8541                            const struct rte_flow_item *item,
8542                            int inner)
8543 {
8544         const struct rte_flow_item_tcp *tcp_m = item->mask;
8545         const struct rte_flow_item_tcp *tcp_v = item->spec;
8546         void *headers_m;
8547         void *headers_v;
8548
8549         if (inner) {
8550                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8551                                          inner_headers);
8552                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8553         } else {
8554                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8555                                          outer_headers);
8556                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8557         }
8558         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8559         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8560         if (!tcp_v)
8561                 return;
8562         if (!tcp_m)
8563                 tcp_m = &rte_flow_item_tcp_mask;
8564         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8565                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
8566         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8567                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8568         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8569                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8570         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8571                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8572         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8573                  tcp_m->hdr.tcp_flags);
8574         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8575                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8576 }
8577
8578 /**
8579  * Add UDP item to matcher and to the value.
8580  *
8581  * @param[in, out] matcher
8582  *   Flow matcher.
8583  * @param[in, out] key
8584  *   Flow matcher value.
8585  * @param[in] item
8586  *   Flow pattern to translate.
8587  * @param[in] inner
8588  *   Item is inner pattern.
8589  */
8590 static void
8591 flow_dv_translate_item_udp(void *matcher, void *key,
8592                            const struct rte_flow_item *item,
8593                            int inner)
8594 {
8595         const struct rte_flow_item_udp *udp_m = item->mask;
8596         const struct rte_flow_item_udp *udp_v = item->spec;
8597         void *headers_m;
8598         void *headers_v;
8599
8600         if (inner) {
8601                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8602                                          inner_headers);
8603                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8604         } else {
8605                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8606                                          outer_headers);
8607                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8608         }
8609         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8610         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8611         if (!udp_v)
8612                 return;
8613         if (!udp_m)
8614                 udp_m = &rte_flow_item_udp_mask;
8615         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8616                  rte_be_to_cpu_16(udp_m->hdr.src_port));
8617         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8618                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8619         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8620                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
8621         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8622                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8623 }
8624
8625 /**
8626  * Add GRE optional Key item to matcher and to the value.
8627  *
8628  * @param[in, out] matcher
8629  *   Flow matcher.
8630  * @param[in, out] key
8631  *   Flow matcher value.
8632  * @param[in] item
8633  *   Flow pattern to translate.
8634  * @param[in] inner
8635  *   Item is inner pattern.
8636  */
8637 static void
8638 flow_dv_translate_item_gre_key(void *matcher, void *key,
8639                                    const struct rte_flow_item *item)
8640 {
8641         const rte_be32_t *key_m = item->mask;
8642         const rte_be32_t *key_v = item->spec;
8643         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8644         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8645         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8646
8647         /* GRE K bit must be on and should already be validated */
8648         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8649         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8650         if (!key_v)
8651                 return;
8652         if (!key_m)
8653                 key_m = &gre_key_default_mask;
8654         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8655                  rte_be_to_cpu_32(*key_m) >> 8);
8656         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8657                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8658         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8659                  rte_be_to_cpu_32(*key_m) & 0xFF);
8660         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8661                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8662 }
8663
8664 /**
8665  * Add GRE item to matcher and to the value.
8666  *
8667  * @param[in, out] matcher
8668  *   Flow matcher.
8669  * @param[in, out] key
8670  *   Flow matcher value.
8671  * @param[in] item
8672  *   Flow pattern to translate.
8673  * @param[in] inner
8674  *   Item is inner pattern.
8675  */
8676 static void
8677 flow_dv_translate_item_gre(void *matcher, void *key,
8678                            const struct rte_flow_item *item,
8679                            int inner)
8680 {
8681         const struct rte_flow_item_gre *gre_m = item->mask;
8682         const struct rte_flow_item_gre *gre_v = item->spec;
8683         void *headers_m;
8684         void *headers_v;
8685         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8686         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8687         struct {
8688                 union {
8689                         __extension__
8690                         struct {
8691                                 uint16_t version:3;
8692                                 uint16_t rsvd0:9;
8693                                 uint16_t s_present:1;
8694                                 uint16_t k_present:1;
8695                                 uint16_t rsvd_bit1:1;
8696                                 uint16_t c_present:1;
8697                         };
8698                         uint16_t value;
8699                 };
8700         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8701
8702         if (inner) {
8703                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8704                                          inner_headers);
8705                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8706         } else {
8707                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8708                                          outer_headers);
8709                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8710         }
8711         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8712         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8713         if (!gre_v)
8714                 return;
8715         if (!gre_m)
8716                 gre_m = &rte_flow_item_gre_mask;
8717         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
8718                  rte_be_to_cpu_16(gre_m->protocol));
8719         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8720                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
8721         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
8722         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
8723         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
8724                  gre_crks_rsvd0_ver_m.c_present);
8725         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
8726                  gre_crks_rsvd0_ver_v.c_present &
8727                  gre_crks_rsvd0_ver_m.c_present);
8728         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
8729                  gre_crks_rsvd0_ver_m.k_present);
8730         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
8731                  gre_crks_rsvd0_ver_v.k_present &
8732                  gre_crks_rsvd0_ver_m.k_present);
8733         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
8734                  gre_crks_rsvd0_ver_m.s_present);
8735         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
8736                  gre_crks_rsvd0_ver_v.s_present &
8737                  gre_crks_rsvd0_ver_m.s_present);
8738 }
8739
8740 /**
8741  * Add NVGRE item to matcher and to the value.
8742  *
8743  * @param[in, out] matcher
8744  *   Flow matcher.
8745  * @param[in, out] key
8746  *   Flow matcher value.
8747  * @param[in] item
8748  *   Flow pattern to translate.
8749  * @param[in] inner
8750  *   Item is inner pattern.
8751  */
8752 static void
8753 flow_dv_translate_item_nvgre(void *matcher, void *key,
8754                              const struct rte_flow_item *item,
8755                              int inner)
8756 {
8757         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
8758         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
8759         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8760         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8761         const char *tni_flow_id_m;
8762         const char *tni_flow_id_v;
8763         char *gre_key_m;
8764         char *gre_key_v;
8765         int size;
8766         int i;
8767
8768         /* For NVGRE, GRE header fields must be set with defined values. */
8769         const struct rte_flow_item_gre gre_spec = {
8770                 .c_rsvd0_ver = RTE_BE16(0x2000),
8771                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
8772         };
8773         const struct rte_flow_item_gre gre_mask = {
8774                 .c_rsvd0_ver = RTE_BE16(0xB000),
8775                 .protocol = RTE_BE16(UINT16_MAX),
8776         };
8777         const struct rte_flow_item gre_item = {
8778                 .spec = &gre_spec,
8779                 .mask = &gre_mask,
8780                 .last = NULL,
8781         };
8782         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
8783         if (!nvgre_v)
8784                 return;
8785         if (!nvgre_m)
8786                 nvgre_m = &rte_flow_item_nvgre_mask;
8787         tni_flow_id_m = (const char *)nvgre_m->tni;
8788         tni_flow_id_v = (const char *)nvgre_v->tni;
8789         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
8790         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
8791         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
8792         memcpy(gre_key_m, tni_flow_id_m, size);
8793         for (i = 0; i < size; ++i)
8794                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
8795 }
8796
8797 /**
8798  * Add VXLAN item to matcher and to the value.
8799  *
8800  * @param[in] dev
8801  *   Pointer to the Ethernet device structure.
8802  * @param[in] attr
8803  *   Flow rule attributes.
8804  * @param[in, out] matcher
8805  *   Flow matcher.
8806  * @param[in, out] key
8807  *   Flow matcher value.
8808  * @param[in] item
8809  *   Flow pattern to translate.
8810  * @param[in] inner
8811  *   Item is inner pattern.
8812  */
8813 static void
8814 flow_dv_translate_item_vxlan(struct rte_eth_dev *dev,
8815                              const struct rte_flow_attr *attr,
8816                              void *matcher, void *key,
8817                              const struct rte_flow_item *item,
8818                              int inner)
8819 {
8820         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
8821         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
8822         void *headers_m;
8823         void *headers_v;
8824         void *misc5_m;
8825         void *misc5_v;
8826         uint32_t *tunnel_header_v;
8827         uint32_t *tunnel_header_m;
8828         uint16_t dport;
8829         struct mlx5_priv *priv = dev->data->dev_private;
8830         const struct rte_flow_item_vxlan nic_mask = {
8831                 .vni = "\xff\xff\xff",
8832                 .rsvd1 = 0xff,
8833         };
8834
8835         if (inner) {
8836                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8837                                          inner_headers);
8838                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8839         } else {
8840                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8841                                          outer_headers);
8842                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8843         }
8844         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8845                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8846         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8847                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8848                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8849         }
8850         if (!vxlan_v)
8851                 return;
8852         if (!vxlan_m) {
8853                 if ((!attr->group && !priv->sh->tunnel_header_0_1) ||
8854                     (attr->group && !priv->sh->misc5_cap))
8855                         vxlan_m = &rte_flow_item_vxlan_mask;
8856                 else
8857                         vxlan_m = &nic_mask;
8858         }
8859         if ((!attr->group && !attr->transfer && !priv->sh->tunnel_header_0_1) ||
8860             ((attr->group || attr->transfer) && !priv->sh->misc5_cap)) {
8861                 void *misc_m;
8862                 void *misc_v;
8863                 char *vni_m;
8864                 char *vni_v;
8865                 int size;
8866                 int i;
8867                 misc_m = MLX5_ADDR_OF(fte_match_param,
8868                                       matcher, misc_parameters);
8869                 misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8870                 size = sizeof(vxlan_m->vni);
8871                 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8872                 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8873                 memcpy(vni_m, vxlan_m->vni, size);
8874                 for (i = 0; i < size; ++i)
8875                         vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8876                 return;
8877         }
8878         misc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5);
8879         misc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);
8880         tunnel_header_v = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8881                                                    misc5_v,
8882                                                    tunnel_header_1);
8883         tunnel_header_m = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8884                                                    misc5_m,
8885                                                    tunnel_header_1);
8886         *tunnel_header_v = (vxlan_v->vni[0] & vxlan_m->vni[0]) |
8887                            (vxlan_v->vni[1] & vxlan_m->vni[1]) << 8 |
8888                            (vxlan_v->vni[2] & vxlan_m->vni[2]) << 16;
8889         if (*tunnel_header_v)
8890                 *tunnel_header_m = vxlan_m->vni[0] |
8891                         vxlan_m->vni[1] << 8 |
8892                         vxlan_m->vni[2] << 16;
8893         else
8894                 *tunnel_header_m = 0x0;
8895         *tunnel_header_v |= (vxlan_v->rsvd1 & vxlan_m->rsvd1) << 24;
8896         if (vxlan_v->rsvd1 & vxlan_m->rsvd1)
8897                 *tunnel_header_m |= vxlan_m->rsvd1 << 24;
8898 }
8899
8900 /**
8901  * Add VXLAN-GPE item to matcher and to the value.
8902  *
8903  * @param[in, out] matcher
8904  *   Flow matcher.
8905  * @param[in, out] key
8906  *   Flow matcher value.
8907  * @param[in] item
8908  *   Flow pattern to translate.
8909  * @param[in] inner
8910  *   Item is inner pattern.
8911  */
8912
8913 static void
8914 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
8915                                  const struct rte_flow_item *item, int inner)
8916 {
8917         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
8918         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
8919         void *headers_m;
8920         void *headers_v;
8921         void *misc_m =
8922                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
8923         void *misc_v =
8924                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8925         char *vni_m;
8926         char *vni_v;
8927         uint16_t dport;
8928         int size;
8929         int i;
8930         uint8_t flags_m = 0xff;
8931         uint8_t flags_v = 0xc;
8932
8933         if (inner) {
8934                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8935                                          inner_headers);
8936                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8937         } else {
8938                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8939                                          outer_headers);
8940                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8941         }
8942         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8943                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8944         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8945                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8946                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8947         }
8948         if (!vxlan_v)
8949                 return;
8950         if (!vxlan_m)
8951                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
8952         size = sizeof(vxlan_m->vni);
8953         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
8954         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
8955         memcpy(vni_m, vxlan_m->vni, size);
8956         for (i = 0; i < size; ++i)
8957                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8958         if (vxlan_m->flags) {
8959                 flags_m = vxlan_m->flags;
8960                 flags_v = vxlan_v->flags;
8961         }
8962         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
8963         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
8964         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
8965                  vxlan_m->protocol);
8966         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
8967                  vxlan_v->protocol);
8968 }
8969
8970 /**
8971  * Add Geneve item to matcher and to the value.
8972  *
8973  * @param[in, out] matcher
8974  *   Flow matcher.
8975  * @param[in, out] key
8976  *   Flow matcher value.
8977  * @param[in] item
8978  *   Flow pattern to translate.
8979  * @param[in] inner
8980  *   Item is inner pattern.
8981  */
8982
8983 static void
8984 flow_dv_translate_item_geneve(void *matcher, void *key,
8985                               const struct rte_flow_item *item, int inner)
8986 {
8987         const struct rte_flow_item_geneve *geneve_m = item->mask;
8988         const struct rte_flow_item_geneve *geneve_v = item->spec;
8989         void *headers_m;
8990         void *headers_v;
8991         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8992         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8993         uint16_t dport;
8994         uint16_t gbhdr_m;
8995         uint16_t gbhdr_v;
8996         char *vni_m;
8997         char *vni_v;
8998         size_t size, i;
8999
9000         if (inner) {
9001                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9002                                          inner_headers);
9003                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9004         } else {
9005                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9006                                          outer_headers);
9007                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9008         }
9009         dport = MLX5_UDP_PORT_GENEVE;
9010         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9011                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9012                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9013         }
9014         if (!geneve_v)
9015                 return;
9016         if (!geneve_m)
9017                 geneve_m = &rte_flow_item_geneve_mask;
9018         size = sizeof(geneve_m->vni);
9019         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
9020         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
9021         memcpy(vni_m, geneve_m->vni, size);
9022         for (i = 0; i < size; ++i)
9023                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
9024         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
9025                  rte_be_to_cpu_16(geneve_m->protocol));
9026         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
9027                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
9028         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
9029         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
9030         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
9031                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9032         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
9033                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9034         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9035                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9036         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9037                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
9038                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9039 }
9040
9041 /**
9042  * Create Geneve TLV option resource.
9043  *
9044  * @param dev[in, out]
9045  *   Pointer to rte_eth_dev structure.
9046  * @param[in, out] tag_be24
9047  *   Tag value in big endian then R-shift 8.
9048  * @parm[in, out] dev_flow
9049  *   Pointer to the dev_flow.
9050  * @param[out] error
9051  *   pointer to error structure.
9052  *
9053  * @return
9054  *   0 on success otherwise -errno and errno is set.
9055  */
9056
9057 int
9058 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
9059                                              const struct rte_flow_item *item,
9060                                              struct rte_flow_error *error)
9061 {
9062         struct mlx5_priv *priv = dev->data->dev_private;
9063         struct mlx5_dev_ctx_shared *sh = priv->sh;
9064         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
9065                         sh->geneve_tlv_option_resource;
9066         struct mlx5_devx_obj *obj;
9067         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9068         int ret = 0;
9069
9070         if (!geneve_opt_v)
9071                 return -1;
9072         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
9073         if (geneve_opt_resource != NULL) {
9074                 if (geneve_opt_resource->option_class ==
9075                         geneve_opt_v->option_class &&
9076                         geneve_opt_resource->option_type ==
9077                         geneve_opt_v->option_type &&
9078                         geneve_opt_resource->length ==
9079                         geneve_opt_v->option_len) {
9080                         /* We already have GENVE TLV option obj allocated. */
9081                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
9082                                            __ATOMIC_RELAXED);
9083                 } else {
9084                         ret = rte_flow_error_set(error, ENOMEM,
9085                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9086                                 "Only one GENEVE TLV option supported");
9087                         goto exit;
9088                 }
9089         } else {
9090                 /* Create a GENEVE TLV object and resource. */
9091                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
9092                                 geneve_opt_v->option_class,
9093                                 geneve_opt_v->option_type,
9094                                 geneve_opt_v->option_len);
9095                 if (!obj) {
9096                         ret = rte_flow_error_set(error, ENODATA,
9097                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9098                                 "Failed to create GENEVE TLV Devx object");
9099                         goto exit;
9100                 }
9101                 sh->geneve_tlv_option_resource =
9102                                 mlx5_malloc(MLX5_MEM_ZERO,
9103                                                 sizeof(*geneve_opt_resource),
9104                                                 0, SOCKET_ID_ANY);
9105                 if (!sh->geneve_tlv_option_resource) {
9106                         claim_zero(mlx5_devx_cmd_destroy(obj));
9107                         ret = rte_flow_error_set(error, ENOMEM,
9108                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9109                                 "GENEVE TLV object memory allocation failed");
9110                         goto exit;
9111                 }
9112                 geneve_opt_resource = sh->geneve_tlv_option_resource;
9113                 geneve_opt_resource->obj = obj;
9114                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
9115                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
9116                 geneve_opt_resource->length = geneve_opt_v->option_len;
9117                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
9118                                 __ATOMIC_RELAXED);
9119         }
9120 exit:
9121         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
9122         return ret;
9123 }
9124
9125 /**
9126  * Add Geneve TLV option item to matcher.
9127  *
9128  * @param[in, out] dev
9129  *   Pointer to rte_eth_dev structure.
9130  * @param[in, out] matcher
9131  *   Flow matcher.
9132  * @param[in, out] key
9133  *   Flow matcher value.
9134  * @param[in] item
9135  *   Flow pattern to translate.
9136  * @param[out] error
9137  *   Pointer to error structure.
9138  */
9139 static int
9140 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
9141                                   void *key, const struct rte_flow_item *item,
9142                                   struct rte_flow_error *error)
9143 {
9144         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
9145         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9146         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9147         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9148         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9149                         misc_parameters_3);
9150         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9151         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
9152         int ret = 0;
9153
9154         if (!geneve_opt_v)
9155                 return -1;
9156         if (!geneve_opt_m)
9157                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
9158         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
9159                                                            error);
9160         if (ret) {
9161                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
9162                 return ret;
9163         }
9164         /*
9165          * Set the option length in GENEVE header if not requested.
9166          * The GENEVE TLV option length is expressed by the option length field
9167          * in the GENEVE header.
9168          * If the option length was not requested but the GENEVE TLV option item
9169          * is present we set the option length field implicitly.
9170          */
9171         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
9172                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9173                          MLX5_GENEVE_OPTLEN_MASK);
9174                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9175                          geneve_opt_v->option_len + 1);
9176         }
9177         /* Set the data. */
9178         if (geneve_opt_v->data) {
9179                 memcpy(&opt_data_key, geneve_opt_v->data,
9180                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9181                                 sizeof(opt_data_key)));
9182                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9183                                 sizeof(opt_data_key));
9184                 memcpy(&opt_data_mask, geneve_opt_m->data,
9185                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9186                                 sizeof(opt_data_mask)));
9187                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9188                                 sizeof(opt_data_mask));
9189                 MLX5_SET(fte_match_set_misc3, misc3_m,
9190                                 geneve_tlv_option_0_data,
9191                                 rte_be_to_cpu_32(opt_data_mask));
9192                 MLX5_SET(fte_match_set_misc3, misc3_v,
9193                                 geneve_tlv_option_0_data,
9194                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
9195         }
9196         return ret;
9197 }
9198
9199 /**
9200  * Add MPLS item to matcher and to the value.
9201  *
9202  * @param[in, out] matcher
9203  *   Flow matcher.
9204  * @param[in, out] key
9205  *   Flow matcher value.
9206  * @param[in] item
9207  *   Flow pattern to translate.
9208  * @param[in] prev_layer
9209  *   The protocol layer indicated in previous item.
9210  * @param[in] inner
9211  *   Item is inner pattern.
9212  */
9213 static void
9214 flow_dv_translate_item_mpls(void *matcher, void *key,
9215                             const struct rte_flow_item *item,
9216                             uint64_t prev_layer,
9217                             int inner)
9218 {
9219         const uint32_t *in_mpls_m = item->mask;
9220         const uint32_t *in_mpls_v = item->spec;
9221         uint32_t *out_mpls_m = 0;
9222         uint32_t *out_mpls_v = 0;
9223         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9224         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9225         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
9226                                      misc_parameters_2);
9227         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9228         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9229         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9230
9231         switch (prev_layer) {
9232         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9233                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
9234                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9235                          MLX5_UDP_PORT_MPLS);
9236                 break;
9237         case MLX5_FLOW_LAYER_GRE:
9238                 /* Fall-through. */
9239         case MLX5_FLOW_LAYER_GRE_KEY:
9240                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
9241                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
9242                          RTE_ETHER_TYPE_MPLS);
9243                 break;
9244         default:
9245                 break;
9246         }
9247         if (!in_mpls_v)
9248                 return;
9249         if (!in_mpls_m)
9250                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
9251         switch (prev_layer) {
9252         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9253                 out_mpls_m =
9254                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9255                                                  outer_first_mpls_over_udp);
9256                 out_mpls_v =
9257                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9258                                                  outer_first_mpls_over_udp);
9259                 break;
9260         case MLX5_FLOW_LAYER_GRE:
9261                 out_mpls_m =
9262                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9263                                                  outer_first_mpls_over_gre);
9264                 out_mpls_v =
9265                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9266                                                  outer_first_mpls_over_gre);
9267                 break;
9268         default:
9269                 /* Inner MPLS not over GRE is not supported. */
9270                 if (!inner) {
9271                         out_mpls_m =
9272                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9273                                                          misc2_m,
9274                                                          outer_first_mpls);
9275                         out_mpls_v =
9276                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9277                                                          misc2_v,
9278                                                          outer_first_mpls);
9279                 }
9280                 break;
9281         }
9282         if (out_mpls_m && out_mpls_v) {
9283                 *out_mpls_m = *in_mpls_m;
9284                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
9285         }
9286 }
9287
9288 /**
9289  * Add metadata register item to matcher
9290  *
9291  * @param[in, out] matcher
9292  *   Flow matcher.
9293  * @param[in, out] key
9294  *   Flow matcher value.
9295  * @param[in] reg_type
9296  *   Type of device metadata register
9297  * @param[in] value
9298  *   Register value
9299  * @param[in] mask
9300  *   Register mask
9301  */
9302 static void
9303 flow_dv_match_meta_reg(void *matcher, void *key,
9304                        enum modify_reg reg_type,
9305                        uint32_t data, uint32_t mask)
9306 {
9307         void *misc2_m =
9308                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
9309         void *misc2_v =
9310                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9311         uint32_t temp;
9312
9313         data &= mask;
9314         switch (reg_type) {
9315         case REG_A:
9316                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
9317                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
9318                 break;
9319         case REG_B:
9320                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
9321                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
9322                 break;
9323         case REG_C_0:
9324                 /*
9325                  * The metadata register C0 field might be divided into
9326                  * source vport index and META item value, we should set
9327                  * this field according to specified mask, not as whole one.
9328                  */
9329                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
9330                 temp |= mask;
9331                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
9332                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
9333                 temp &= ~mask;
9334                 temp |= data;
9335                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
9336                 break;
9337         case REG_C_1:
9338                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
9339                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
9340                 break;
9341         case REG_C_2:
9342                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
9343                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
9344                 break;
9345         case REG_C_3:
9346                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
9347                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
9348                 break;
9349         case REG_C_4:
9350                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
9351                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
9352                 break;
9353         case REG_C_5:
9354                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
9355                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
9356                 break;
9357         case REG_C_6:
9358                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
9359                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
9360                 break;
9361         case REG_C_7:
9362                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
9363                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
9364                 break;
9365         default:
9366                 MLX5_ASSERT(false);
9367                 break;
9368         }
9369 }
9370
9371 /**
9372  * Add MARK item to matcher
9373  *
9374  * @param[in] dev
9375  *   The device to configure through.
9376  * @param[in, out] matcher
9377  *   Flow matcher.
9378  * @param[in, out] key
9379  *   Flow matcher value.
9380  * @param[in] item
9381  *   Flow pattern to translate.
9382  */
9383 static void
9384 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
9385                             void *matcher, void *key,
9386                             const struct rte_flow_item *item)
9387 {
9388         struct mlx5_priv *priv = dev->data->dev_private;
9389         const struct rte_flow_item_mark *mark;
9390         uint32_t value;
9391         uint32_t mask;
9392
9393         mark = item->mask ? (const void *)item->mask :
9394                             &rte_flow_item_mark_mask;
9395         mask = mark->id & priv->sh->dv_mark_mask;
9396         mark = (const void *)item->spec;
9397         MLX5_ASSERT(mark);
9398         value = mark->id & priv->sh->dv_mark_mask & mask;
9399         if (mask) {
9400                 enum modify_reg reg;
9401
9402                 /* Get the metadata register index for the mark. */
9403                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
9404                 MLX5_ASSERT(reg > 0);
9405                 if (reg == REG_C_0) {
9406                         struct mlx5_priv *priv = dev->data->dev_private;
9407                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9408                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9409
9410                         mask &= msk_c0;
9411                         mask <<= shl_c0;
9412                         value <<= shl_c0;
9413                 }
9414                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9415         }
9416 }
9417
9418 /**
9419  * Add META item to matcher
9420  *
9421  * @param[in] dev
9422  *   The devich to configure through.
9423  * @param[in, out] matcher
9424  *   Flow matcher.
9425  * @param[in, out] key
9426  *   Flow matcher value.
9427  * @param[in] attr
9428  *   Attributes of flow that includes this item.
9429  * @param[in] item
9430  *   Flow pattern to translate.
9431  */
9432 static void
9433 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
9434                             void *matcher, void *key,
9435                             const struct rte_flow_attr *attr,
9436                             const struct rte_flow_item *item)
9437 {
9438         const struct rte_flow_item_meta *meta_m;
9439         const struct rte_flow_item_meta *meta_v;
9440
9441         meta_m = (const void *)item->mask;
9442         if (!meta_m)
9443                 meta_m = &rte_flow_item_meta_mask;
9444         meta_v = (const void *)item->spec;
9445         if (meta_v) {
9446                 int reg;
9447                 uint32_t value = meta_v->data;
9448                 uint32_t mask = meta_m->data;
9449
9450                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
9451                 if (reg < 0)
9452                         return;
9453                 MLX5_ASSERT(reg != REG_NON);
9454                 if (reg == REG_C_0) {
9455                         struct mlx5_priv *priv = dev->data->dev_private;
9456                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9457                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9458
9459                         mask &= msk_c0;
9460                         mask <<= shl_c0;
9461                         value <<= shl_c0;
9462                 }
9463                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9464         }
9465 }
9466
9467 /**
9468  * Add vport metadata Reg C0 item to matcher
9469  *
9470  * @param[in, out] matcher
9471  *   Flow matcher.
9472  * @param[in, out] key
9473  *   Flow matcher value.
9474  * @param[in] reg
9475  *   Flow pattern to translate.
9476  */
9477 static void
9478 flow_dv_translate_item_meta_vport(void *matcher, void *key,
9479                                   uint32_t value, uint32_t mask)
9480 {
9481         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
9482 }
9483
9484 /**
9485  * Add tag item to matcher
9486  *
9487  * @param[in] dev
9488  *   The devich to configure through.
9489  * @param[in, out] matcher
9490  *   Flow matcher.
9491  * @param[in, out] key
9492  *   Flow matcher value.
9493  * @param[in] item
9494  *   Flow pattern to translate.
9495  */
9496 static void
9497 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
9498                                 void *matcher, void *key,
9499                                 const struct rte_flow_item *item)
9500 {
9501         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
9502         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
9503         uint32_t mask, value;
9504
9505         MLX5_ASSERT(tag_v);
9506         value = tag_v->data;
9507         mask = tag_m ? tag_m->data : UINT32_MAX;
9508         if (tag_v->id == REG_C_0) {
9509                 struct mlx5_priv *priv = dev->data->dev_private;
9510                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9511                 uint32_t shl_c0 = rte_bsf32(msk_c0);
9512
9513                 mask &= msk_c0;
9514                 mask <<= shl_c0;
9515                 value <<= shl_c0;
9516         }
9517         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
9518 }
9519
9520 /**
9521  * Add TAG item to matcher
9522  *
9523  * @param[in] dev
9524  *   The devich to configure through.
9525  * @param[in, out] matcher
9526  *   Flow matcher.
9527  * @param[in, out] key
9528  *   Flow matcher value.
9529  * @param[in] item
9530  *   Flow pattern to translate.
9531  */
9532 static void
9533 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
9534                            void *matcher, void *key,
9535                            const struct rte_flow_item *item)
9536 {
9537         const struct rte_flow_item_tag *tag_v = item->spec;
9538         const struct rte_flow_item_tag *tag_m = item->mask;
9539         enum modify_reg reg;
9540
9541         MLX5_ASSERT(tag_v);
9542         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
9543         /* Get the metadata register index for the tag. */
9544         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
9545         MLX5_ASSERT(reg > 0);
9546         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
9547 }
9548
9549 /**
9550  * Add source vport match to the specified matcher.
9551  *
9552  * @param[in, out] matcher
9553  *   Flow matcher.
9554  * @param[in, out] key
9555  *   Flow matcher value.
9556  * @param[in] port
9557  *   Source vport value to match
9558  * @param[in] mask
9559  *   Mask
9560  */
9561 static void
9562 flow_dv_translate_item_source_vport(void *matcher, void *key,
9563                                     int16_t port, uint16_t mask)
9564 {
9565         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9566         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9567
9568         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
9569         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
9570 }
9571
9572 /**
9573  * Translate port-id item to eswitch match on  port-id.
9574  *
9575  * @param[in] dev
9576  *   The devich to configure through.
9577  * @param[in, out] matcher
9578  *   Flow matcher.
9579  * @param[in, out] key
9580  *   Flow matcher value.
9581  * @param[in] item
9582  *   Flow pattern to translate.
9583  * @param[in]
9584  *   Flow attributes.
9585  *
9586  * @return
9587  *   0 on success, a negative errno value otherwise.
9588  */
9589 static int
9590 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
9591                                void *key, const struct rte_flow_item *item,
9592                                const struct rte_flow_attr *attr)
9593 {
9594         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
9595         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
9596         struct mlx5_priv *priv;
9597         uint16_t mask, id;
9598
9599         mask = pid_m ? pid_m->id : 0xffff;
9600         id = pid_v ? pid_v->id : dev->data->port_id;
9601         priv = mlx5_port_to_eswitch_info(id, item == NULL);
9602         if (!priv)
9603                 return -rte_errno;
9604         /*
9605          * Translate to vport field or to metadata, depending on mode.
9606          * Kernel can use either misc.source_port or half of C0 metadata
9607          * register.
9608          */
9609         if (priv->vport_meta_mask) {
9610                 /*
9611                  * Provide the hint for SW steering library
9612                  * to insert the flow into ingress domain and
9613                  * save the extra vport match.
9614                  */
9615                 if (mask == 0xffff && priv->vport_id == 0xffff &&
9616                     priv->pf_bond < 0 && attr->transfer)
9617                         flow_dv_translate_item_source_vport
9618                                 (matcher, key, priv->vport_id, mask);
9619                 /*
9620                  * We should always set the vport metadata register,
9621                  * otherwise the SW steering library can drop
9622                  * the rule if wire vport metadata value is not zero,
9623                  * it depends on kernel configuration.
9624                  */
9625                 flow_dv_translate_item_meta_vport(matcher, key,
9626                                                   priv->vport_meta_tag,
9627                                                   priv->vport_meta_mask);
9628         } else {
9629                 flow_dv_translate_item_source_vport(matcher, key,
9630                                                     priv->vport_id, mask);
9631         }
9632         return 0;
9633 }
9634
9635 /**
9636  * Add ICMP6 item to matcher and to the value.
9637  *
9638  * @param[in, out] matcher
9639  *   Flow matcher.
9640  * @param[in, out] key
9641  *   Flow matcher value.
9642  * @param[in] item
9643  *   Flow pattern to translate.
9644  * @param[in] inner
9645  *   Item is inner pattern.
9646  */
9647 static void
9648 flow_dv_translate_item_icmp6(void *matcher, void *key,
9649                               const struct rte_flow_item *item,
9650                               int inner)
9651 {
9652         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
9653         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
9654         void *headers_m;
9655         void *headers_v;
9656         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9657                                      misc_parameters_3);
9658         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9659         if (inner) {
9660                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9661                                          inner_headers);
9662                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9663         } else {
9664                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9665                                          outer_headers);
9666                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9667         }
9668         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9669         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
9670         if (!icmp6_v)
9671                 return;
9672         if (!icmp6_m)
9673                 icmp6_m = &rte_flow_item_icmp6_mask;
9674         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
9675         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
9676                  icmp6_v->type & icmp6_m->type);
9677         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
9678         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
9679                  icmp6_v->code & icmp6_m->code);
9680 }
9681
9682 /**
9683  * Add ICMP item to matcher and to the value.
9684  *
9685  * @param[in, out] matcher
9686  *   Flow matcher.
9687  * @param[in, out] key
9688  *   Flow matcher value.
9689  * @param[in] item
9690  *   Flow pattern to translate.
9691  * @param[in] inner
9692  *   Item is inner pattern.
9693  */
9694 static void
9695 flow_dv_translate_item_icmp(void *matcher, void *key,
9696                             const struct rte_flow_item *item,
9697                             int inner)
9698 {
9699         const struct rte_flow_item_icmp *icmp_m = item->mask;
9700         const struct rte_flow_item_icmp *icmp_v = item->spec;
9701         uint32_t icmp_header_data_m = 0;
9702         uint32_t icmp_header_data_v = 0;
9703         void *headers_m;
9704         void *headers_v;
9705         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9706                                      misc_parameters_3);
9707         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9708         if (inner) {
9709                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9710                                          inner_headers);
9711                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9712         } else {
9713                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9714                                          outer_headers);
9715                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9716         }
9717         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9718         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
9719         if (!icmp_v)
9720                 return;
9721         if (!icmp_m)
9722                 icmp_m = &rte_flow_item_icmp_mask;
9723         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
9724                  icmp_m->hdr.icmp_type);
9725         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
9726                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
9727         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
9728                  icmp_m->hdr.icmp_code);
9729         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
9730                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
9731         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
9732         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
9733         if (icmp_header_data_m) {
9734                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
9735                 icmp_header_data_v |=
9736                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
9737                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
9738                          icmp_header_data_m);
9739                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
9740                          icmp_header_data_v & icmp_header_data_m);
9741         }
9742 }
9743
9744 /**
9745  * Add GTP item to matcher and to the value.
9746  *
9747  * @param[in, out] matcher
9748  *   Flow matcher.
9749  * @param[in, out] key
9750  *   Flow matcher value.
9751  * @param[in] item
9752  *   Flow pattern to translate.
9753  * @param[in] inner
9754  *   Item is inner pattern.
9755  */
9756 static void
9757 flow_dv_translate_item_gtp(void *matcher, void *key,
9758                            const struct rte_flow_item *item, int inner)
9759 {
9760         const struct rte_flow_item_gtp *gtp_m = item->mask;
9761         const struct rte_flow_item_gtp *gtp_v = item->spec;
9762         void *headers_m;
9763         void *headers_v;
9764         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9765                                      misc_parameters_3);
9766         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9767         uint16_t dport = RTE_GTPU_UDP_PORT;
9768
9769         if (inner) {
9770                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9771                                          inner_headers);
9772                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9773         } else {
9774                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9775                                          outer_headers);
9776                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9777         }
9778         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9779                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9780                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9781         }
9782         if (!gtp_v)
9783                 return;
9784         if (!gtp_m)
9785                 gtp_m = &rte_flow_item_gtp_mask;
9786         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
9787                  gtp_m->v_pt_rsv_flags);
9788         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
9789                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
9790         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
9791         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
9792                  gtp_v->msg_type & gtp_m->msg_type);
9793         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
9794                  rte_be_to_cpu_32(gtp_m->teid));
9795         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
9796                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
9797 }
9798
9799 /**
9800  * Add GTP PSC item to matcher.
9801  *
9802  * @param[in, out] matcher
9803  *   Flow matcher.
9804  * @param[in, out] key
9805  *   Flow matcher value.
9806  * @param[in] item
9807  *   Flow pattern to translate.
9808  */
9809 static int
9810 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
9811                                const struct rte_flow_item *item)
9812 {
9813         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
9814         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
9815         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9816                         misc_parameters_3);
9817         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9818         union {
9819                 uint32_t w32;
9820                 struct {
9821                         uint16_t seq_num;
9822                         uint8_t npdu_num;
9823                         uint8_t next_ext_header_type;
9824                 };
9825         } dw_2;
9826         uint8_t gtp_flags;
9827
9828         /* Always set E-flag match on one, regardless of GTP item settings. */
9829         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
9830         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9831         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
9832         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
9833         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9834         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
9835         /*Set next extension header type. */
9836         dw_2.seq_num = 0;
9837         dw_2.npdu_num = 0;
9838         dw_2.next_ext_header_type = 0xff;
9839         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
9840                  rte_cpu_to_be_32(dw_2.w32));
9841         dw_2.seq_num = 0;
9842         dw_2.npdu_num = 0;
9843         dw_2.next_ext_header_type = 0x85;
9844         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
9845                  rte_cpu_to_be_32(dw_2.w32));
9846         if (gtp_psc_v) {
9847                 union {
9848                         uint32_t w32;
9849                         struct {
9850                                 uint8_t len;
9851                                 uint8_t type_flags;
9852                                 uint8_t qfi;
9853                                 uint8_t reserved;
9854                         };
9855                 } dw_0;
9856
9857                 /*Set extension header PDU type and Qos. */
9858                 if (!gtp_psc_m)
9859                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
9860                 dw_0.w32 = 0;
9861                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type);
9862                 dw_0.qfi = gtp_psc_m->qfi;
9863                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
9864                          rte_cpu_to_be_32(dw_0.w32));
9865                 dw_0.w32 = 0;
9866                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type &
9867                                                         gtp_psc_m->pdu_type);
9868                 dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi;
9869                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9870                          rte_cpu_to_be_32(dw_0.w32));
9871         }
9872         return 0;
9873 }
9874
9875 /**
9876  * Add eCPRI item to matcher and to the value.
9877  *
9878  * @param[in] dev
9879  *   The devich to configure through.
9880  * @param[in, out] matcher
9881  *   Flow matcher.
9882  * @param[in, out] key
9883  *   Flow matcher value.
9884  * @param[in] item
9885  *   Flow pattern to translate.
9886  * @param[in] samples
9887  *   Sample IDs to be used in the matching.
9888  */
9889 static void
9890 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
9891                              void *key, const struct rte_flow_item *item)
9892 {
9893         struct mlx5_priv *priv = dev->data->dev_private;
9894         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
9895         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
9896         struct rte_ecpri_common_hdr common;
9897         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
9898                                      misc_parameters_4);
9899         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
9900         uint32_t *samples;
9901         void *dw_m;
9902         void *dw_v;
9903
9904         if (!ecpri_v)
9905                 return;
9906         if (!ecpri_m)
9907                 ecpri_m = &rte_flow_item_ecpri_mask;
9908         /*
9909          * Maximal four DW samples are supported in a single matching now.
9910          * Two are used now for a eCPRI matching:
9911          * 1. Type: one byte, mask should be 0x00ff0000 in network order
9912          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
9913          *    if any.
9914          */
9915         if (!ecpri_m->hdr.common.u32)
9916                 return;
9917         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
9918         /* Need to take the whole DW as the mask to fill the entry. */
9919         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9920                             prog_sample_field_value_0);
9921         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9922                             prog_sample_field_value_0);
9923         /* Already big endian (network order) in the header. */
9924         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
9925         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
9926         /* Sample#0, used for matching type, offset 0. */
9927         MLX5_SET(fte_match_set_misc4, misc4_m,
9928                  prog_sample_field_id_0, samples[0]);
9929         /* It makes no sense to set the sample ID in the mask field. */
9930         MLX5_SET(fte_match_set_misc4, misc4_v,
9931                  prog_sample_field_id_0, samples[0]);
9932         /*
9933          * Checking if message body part needs to be matched.
9934          * Some wildcard rules only matching type field should be supported.
9935          */
9936         if (ecpri_m->hdr.dummy[0]) {
9937                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
9938                 switch (common.type) {
9939                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
9940                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
9941                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
9942                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9943                                             prog_sample_field_value_1);
9944                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9945                                             prog_sample_field_value_1);
9946                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
9947                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
9948                                             ecpri_m->hdr.dummy[0];
9949                         /* Sample#1, to match message body, offset 4. */
9950                         MLX5_SET(fte_match_set_misc4, misc4_m,
9951                                  prog_sample_field_id_1, samples[1]);
9952                         MLX5_SET(fte_match_set_misc4, misc4_v,
9953                                  prog_sample_field_id_1, samples[1]);
9954                         break;
9955                 default:
9956                         /* Others, do not match any sample ID. */
9957                         break;
9958                 }
9959         }
9960 }
9961
9962 /*
9963  * Add connection tracking status item to matcher
9964  *
9965  * @param[in] dev
9966  *   The devich to configure through.
9967  * @param[in, out] matcher
9968  *   Flow matcher.
9969  * @param[in, out] key
9970  *   Flow matcher value.
9971  * @param[in] item
9972  *   Flow pattern to translate.
9973  */
9974 static void
9975 flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
9976                               void *matcher, void *key,
9977                               const struct rte_flow_item *item)
9978 {
9979         uint32_t reg_value = 0;
9980         int reg_id;
9981         /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */
9982         uint32_t reg_mask = 0;
9983         const struct rte_flow_item_conntrack *spec = item->spec;
9984         const struct rte_flow_item_conntrack *mask = item->mask;
9985         uint32_t flags;
9986         struct rte_flow_error error;
9987
9988         if (!mask)
9989                 mask = &rte_flow_item_conntrack_mask;
9990         if (!spec || !mask->flags)
9991                 return;
9992         flags = spec->flags & mask->flags;
9993         /* The conflict should be checked in the validation. */
9994         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
9995                 reg_value |= MLX5_CT_SYNDROME_VALID;
9996         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
9997                 reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
9998         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
9999                 reg_value |= MLX5_CT_SYNDROME_INVALID;
10000         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
10001                 reg_value |= MLX5_CT_SYNDROME_TRAP;
10002         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10003                 reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
10004         if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
10005                            RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
10006                            RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
10007                 reg_mask |= 0xc0;
10008         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10009                 reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
10010         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10011                 reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
10012         /* The REG_C_x value could be saved during startup. */
10013         reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);
10014         if (reg_id == REG_NON)
10015                 return;
10016         flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,
10017                                reg_value, reg_mask);
10018 }
10019
10020 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
10021
10022 #define HEADER_IS_ZERO(match_criteria, headers)                              \
10023         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
10024                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
10025
10026 /**
10027  * Calculate flow matcher enable bitmap.
10028  *
10029  * @param match_criteria
10030  *   Pointer to flow matcher criteria.
10031  *
10032  * @return
10033  *   Bitmap of enabled fields.
10034  */
10035 static uint8_t
10036 flow_dv_matcher_enable(uint32_t *match_criteria)
10037 {
10038         uint8_t match_criteria_enable;
10039
10040         match_criteria_enable =
10041                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
10042                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
10043         match_criteria_enable |=
10044                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
10045                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
10046         match_criteria_enable |=
10047                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
10048                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
10049         match_criteria_enable |=
10050                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
10051                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
10052         match_criteria_enable |=
10053                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
10054                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
10055         match_criteria_enable |=
10056                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
10057                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
10058         match_criteria_enable |=
10059                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_5)) <<
10060                 MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT;
10061         return match_criteria_enable;
10062 }
10063
10064 static void
10065 __flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria)
10066 {
10067         /*
10068          * Check flow matching criteria first, subtract misc5/4 length if flow
10069          * doesn't own misc5/4 parameters. In some old rdma-core releases,
10070          * misc5/4 are not supported, and matcher creation failure is expected
10071          * w/o subtration. If misc5 is provided, misc4 must be counted in since
10072          * misc5 is right after misc4.
10073          */
10074         if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) {
10075                 *size = MLX5_ST_SZ_BYTES(fte_match_param) -
10076                         MLX5_ST_SZ_BYTES(fte_match_set_misc5);
10077                 if (!(match_criteria & (1 <<
10078                         MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT))) {
10079                         *size -= MLX5_ST_SZ_BYTES(fte_match_set_misc4);
10080                 }
10081         }
10082 }
10083
10084 static struct mlx5_list_entry *
10085 flow_dv_matcher_clone_cb(void *tool_ctx __rte_unused,
10086                          struct mlx5_list_entry *entry, void *cb_ctx)
10087 {
10088         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10089         struct mlx5_flow_dv_matcher *ref = ctx->data;
10090         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10091                                                             typeof(*tbl), tbl);
10092         struct mlx5_flow_dv_matcher *resource = mlx5_malloc(MLX5_MEM_ANY,
10093                                                             sizeof(*resource),
10094                                                             0, SOCKET_ID_ANY);
10095
10096         if (!resource) {
10097                 rte_flow_error_set(ctx->error, ENOMEM,
10098                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10099                                    "cannot create matcher");
10100                 return NULL;
10101         }
10102         memcpy(resource, entry, sizeof(*resource));
10103         resource->tbl = &tbl->tbl;
10104         return &resource->entry;
10105 }
10106
10107 static void
10108 flow_dv_matcher_clone_free_cb(void *tool_ctx __rte_unused,
10109                              struct mlx5_list_entry *entry)
10110 {
10111         mlx5_free(entry);
10112 }
10113
10114 struct mlx5_list_entry *
10115 flow_dv_tbl_create_cb(void *tool_ctx, void *cb_ctx)
10116 {
10117         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10118         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10119         struct rte_eth_dev *dev = ctx->dev;
10120         struct mlx5_flow_tbl_data_entry *tbl_data;
10121         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data2;
10122         struct rte_flow_error *error = ctx->error;
10123         union mlx5_flow_tbl_key key = { .v64 = *(uint64_t *)(ctx->data) };
10124         struct mlx5_flow_tbl_resource *tbl;
10125         void *domain;
10126         uint32_t idx = 0;
10127         int ret;
10128
10129         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10130         if (!tbl_data) {
10131                 rte_flow_error_set(error, ENOMEM,
10132                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10133                                    NULL,
10134                                    "cannot allocate flow table data entry");
10135                 return NULL;
10136         }
10137         tbl_data->idx = idx;
10138         tbl_data->tunnel = tt_prm->tunnel;
10139         tbl_data->group_id = tt_prm->group_id;
10140         tbl_data->external = !!tt_prm->external;
10141         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
10142         tbl_data->is_egress = !!key.is_egress;
10143         tbl_data->is_transfer = !!key.is_fdb;
10144         tbl_data->dummy = !!key.dummy;
10145         tbl_data->level = key.level;
10146         tbl_data->id = key.id;
10147         tbl = &tbl_data->tbl;
10148         if (key.dummy)
10149                 return &tbl_data->entry;
10150         if (key.is_fdb)
10151                 domain = sh->fdb_domain;
10152         else if (key.is_egress)
10153                 domain = sh->tx_domain;
10154         else
10155                 domain = sh->rx_domain;
10156         ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
10157         if (ret) {
10158                 rte_flow_error_set(error, ENOMEM,
10159                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10160                                    NULL, "cannot create flow table object");
10161                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10162                 return NULL;
10163         }
10164         if (key.level != 0) {
10165                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
10166                                         (tbl->obj, &tbl_data->jump.action);
10167                 if (ret) {
10168                         rte_flow_error_set(error, ENOMEM,
10169                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10170                                            NULL,
10171                                            "cannot create flow jump action");
10172                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10173                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10174                         return NULL;
10175                 }
10176         }
10177         MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
10178               key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
10179               key.level, key.id);
10180         tbl_data->matchers = mlx5_list_create(matcher_name, sh, true,
10181                                               flow_dv_matcher_create_cb,
10182                                               flow_dv_matcher_match_cb,
10183                                               flow_dv_matcher_remove_cb,
10184                                               flow_dv_matcher_clone_cb,
10185                                               flow_dv_matcher_clone_free_cb);
10186         if (!tbl_data->matchers) {
10187                 rte_flow_error_set(error, ENOMEM,
10188                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10189                                    NULL,
10190                                    "cannot create tbl matcher list");
10191                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10192                 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10193                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10194                 return NULL;
10195         }
10196         return &tbl_data->entry;
10197 }
10198
10199 int
10200 flow_dv_tbl_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10201                      void *cb_ctx)
10202 {
10203         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10204         struct mlx5_flow_tbl_data_entry *tbl_data =
10205                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10206         union mlx5_flow_tbl_key key = { .v64 =  *(uint64_t *)(ctx->data) };
10207
10208         return tbl_data->level != key.level ||
10209                tbl_data->id != key.id ||
10210                tbl_data->dummy != key.dummy ||
10211                tbl_data->is_transfer != !!key.is_fdb ||
10212                tbl_data->is_egress != !!key.is_egress;
10213 }
10214
10215 struct mlx5_list_entry *
10216 flow_dv_tbl_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10217                       void *cb_ctx)
10218 {
10219         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10220         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10221         struct mlx5_flow_tbl_data_entry *tbl_data;
10222         struct rte_flow_error *error = ctx->error;
10223         uint32_t idx = 0;
10224
10225         tbl_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10226         if (!tbl_data) {
10227                 rte_flow_error_set(error, ENOMEM,
10228                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10229                                    NULL,
10230                                    "cannot allocate flow table data entry");
10231                 return NULL;
10232         }
10233         memcpy(tbl_data, oentry, sizeof(*tbl_data));
10234         tbl_data->idx = idx;
10235         return &tbl_data->entry;
10236 }
10237
10238 void
10239 flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10240 {
10241         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10242         struct mlx5_flow_tbl_data_entry *tbl_data =
10243                     container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10244
10245         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10246 }
10247
10248 /**
10249  * Get a flow table.
10250  *
10251  * @param[in, out] dev
10252  *   Pointer to rte_eth_dev structure.
10253  * @param[in] table_level
10254  *   Table level to use.
10255  * @param[in] egress
10256  *   Direction of the table.
10257  * @param[in] transfer
10258  *   E-Switch or NIC flow.
10259  * @param[in] dummy
10260  *   Dummy entry for dv API.
10261  * @param[in] table_id
10262  *   Table id to use.
10263  * @param[out] error
10264  *   pointer to error structure.
10265  *
10266  * @return
10267  *   Returns tables resource based on the index, NULL in case of failed.
10268  */
10269 struct mlx5_flow_tbl_resource *
10270 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
10271                          uint32_t table_level, uint8_t egress,
10272                          uint8_t transfer,
10273                          bool external,
10274                          const struct mlx5_flow_tunnel *tunnel,
10275                          uint32_t group_id, uint8_t dummy,
10276                          uint32_t table_id,
10277                          struct rte_flow_error *error)
10278 {
10279         struct mlx5_priv *priv = dev->data->dev_private;
10280         union mlx5_flow_tbl_key table_key = {
10281                 {
10282                         .level = table_level,
10283                         .id = table_id,
10284                         .reserved = 0,
10285                         .dummy = !!dummy,
10286                         .is_fdb = !!transfer,
10287                         .is_egress = !!egress,
10288                 }
10289         };
10290         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
10291                 .tunnel = tunnel,
10292                 .group_id = group_id,
10293                 .external = external,
10294         };
10295         struct mlx5_flow_cb_ctx ctx = {
10296                 .dev = dev,
10297                 .error = error,
10298                 .data = &table_key.v64,
10299                 .data2 = &tt_prm,
10300         };
10301         struct mlx5_list_entry *entry;
10302         struct mlx5_flow_tbl_data_entry *tbl_data;
10303
10304         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
10305         if (!entry) {
10306                 rte_flow_error_set(error, ENOMEM,
10307                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10308                                    "cannot get table");
10309                 return NULL;
10310         }
10311         DRV_LOG(DEBUG, "table_level %u table_id %u "
10312                 "tunnel %u group %u registered.",
10313                 table_level, table_id,
10314                 tunnel ? tunnel->tunnel_id : 0, group_id);
10315         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10316         return &tbl_data->tbl;
10317 }
10318
10319 void
10320 flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10321 {
10322         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10323         struct mlx5_flow_tbl_data_entry *tbl_data =
10324                     container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10325
10326         MLX5_ASSERT(entry && sh);
10327         if (tbl_data->jump.action)
10328                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10329         if (tbl_data->tbl.obj)
10330                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
10331         if (tbl_data->tunnel_offload && tbl_data->external) {
10332                 struct mlx5_list_entry *he;
10333                 struct mlx5_hlist *tunnel_grp_hash;
10334                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
10335                 union tunnel_tbl_key tunnel_key = {
10336                         .tunnel_id = tbl_data->tunnel ?
10337                                         tbl_data->tunnel->tunnel_id : 0,
10338                         .group = tbl_data->group_id
10339                 };
10340                 uint32_t table_level = tbl_data->level;
10341                 struct mlx5_flow_cb_ctx ctx = {
10342                         .data = (void *)&tunnel_key.val,
10343                 };
10344
10345                 tunnel_grp_hash = tbl_data->tunnel ?
10346                                         tbl_data->tunnel->groups :
10347                                         thub->groups;
10348                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, &ctx);
10349                 if (he)
10350                         mlx5_hlist_unregister(tunnel_grp_hash, he);
10351                 DRV_LOG(DEBUG,
10352                         "table_level %u id %u tunnel %u group %u released.",
10353                         table_level,
10354                         tbl_data->id,
10355                         tbl_data->tunnel ?
10356                         tbl_data->tunnel->tunnel_id : 0,
10357                         tbl_data->group_id);
10358         }
10359         mlx5_list_destroy(tbl_data->matchers);
10360         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10361 }
10362
10363 /**
10364  * Release a flow table.
10365  *
10366  * @param[in] sh
10367  *   Pointer to device shared structure.
10368  * @param[in] tbl
10369  *   Table resource to be released.
10370  *
10371  * @return
10372  *   Returns 0 if table was released, else return 1;
10373  */
10374 static int
10375 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
10376                              struct mlx5_flow_tbl_resource *tbl)
10377 {
10378         struct mlx5_flow_tbl_data_entry *tbl_data =
10379                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10380
10381         if (!tbl)
10382                 return 0;
10383         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
10384 }
10385
10386 int
10387 flow_dv_matcher_match_cb(void *tool_ctx __rte_unused,
10388                          struct mlx5_list_entry *entry, void *cb_ctx)
10389 {
10390         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10391         struct mlx5_flow_dv_matcher *ref = ctx->data;
10392         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
10393                                                         entry);
10394
10395         return cur->crc != ref->crc ||
10396                cur->priority != ref->priority ||
10397                memcmp((const void *)cur->mask.buf,
10398                       (const void *)ref->mask.buf, ref->mask.size);
10399 }
10400
10401 struct mlx5_list_entry *
10402 flow_dv_matcher_create_cb(void *tool_ctx, void *cb_ctx)
10403 {
10404         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10405         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10406         struct mlx5_flow_dv_matcher *ref = ctx->data;
10407         struct mlx5_flow_dv_matcher *resource;
10408         struct mlx5dv_flow_matcher_attr dv_attr = {
10409                 .type = IBV_FLOW_ATTR_NORMAL,
10410                 .match_mask = (void *)&ref->mask,
10411         };
10412         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10413                                                             typeof(*tbl), tbl);
10414         int ret;
10415
10416         resource = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*resource), 0,
10417                                SOCKET_ID_ANY);
10418         if (!resource) {
10419                 rte_flow_error_set(ctx->error, ENOMEM,
10420                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10421                                    "cannot create matcher");
10422                 return NULL;
10423         }
10424         *resource = *ref;
10425         dv_attr.match_criteria_enable =
10426                 flow_dv_matcher_enable(resource->mask.buf);
10427         __flow_dv_adjust_buf_size(&ref->mask.size,
10428                                   dv_attr.match_criteria_enable);
10429         dv_attr.priority = ref->priority;
10430         if (tbl->is_egress)
10431                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
10432         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
10433                                                &resource->matcher_object);
10434         if (ret) {
10435                 mlx5_free(resource);
10436                 rte_flow_error_set(ctx->error, ENOMEM,
10437                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10438                                    "cannot create matcher");
10439                 return NULL;
10440         }
10441         return &resource->entry;
10442 }
10443
10444 /**
10445  * Register the flow matcher.
10446  *
10447  * @param[in, out] dev
10448  *   Pointer to rte_eth_dev structure.
10449  * @param[in, out] matcher
10450  *   Pointer to flow matcher.
10451  * @param[in, out] key
10452  *   Pointer to flow table key.
10453  * @parm[in, out] dev_flow
10454  *   Pointer to the dev_flow.
10455  * @param[out] error
10456  *   pointer to error structure.
10457  *
10458  * @return
10459  *   0 on success otherwise -errno and errno is set.
10460  */
10461 static int
10462 flow_dv_matcher_register(struct rte_eth_dev *dev,
10463                          struct mlx5_flow_dv_matcher *ref,
10464                          union mlx5_flow_tbl_key *key,
10465                          struct mlx5_flow *dev_flow,
10466                          const struct mlx5_flow_tunnel *tunnel,
10467                          uint32_t group_id,
10468                          struct rte_flow_error *error)
10469 {
10470         struct mlx5_list_entry *entry;
10471         struct mlx5_flow_dv_matcher *resource;
10472         struct mlx5_flow_tbl_resource *tbl;
10473         struct mlx5_flow_tbl_data_entry *tbl_data;
10474         struct mlx5_flow_cb_ctx ctx = {
10475                 .error = error,
10476                 .data = ref,
10477         };
10478         /**
10479          * tunnel offload API requires this registration for cases when
10480          * tunnel match rule was inserted before tunnel set rule.
10481          */
10482         tbl = flow_dv_tbl_resource_get(dev, key->level,
10483                                        key->is_egress, key->is_fdb,
10484                                        dev_flow->external, tunnel,
10485                                        group_id, 0, key->id, error);
10486         if (!tbl)
10487                 return -rte_errno;      /* No need to refill the error info */
10488         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10489         ref->tbl = tbl;
10490         entry = mlx5_list_register(tbl_data->matchers, &ctx);
10491         if (!entry) {
10492                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10493                 return rte_flow_error_set(error, ENOMEM,
10494                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10495                                           "cannot allocate ref memory");
10496         }
10497         resource = container_of(entry, typeof(*resource), entry);
10498         dev_flow->handle->dvh.matcher = resource;
10499         return 0;
10500 }
10501
10502 struct mlx5_list_entry *
10503 flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx)
10504 {
10505         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10506         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10507         struct mlx5_flow_dv_tag_resource *entry;
10508         uint32_t idx = 0;
10509         int ret;
10510
10511         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10512         if (!entry) {
10513                 rte_flow_error_set(ctx->error, ENOMEM,
10514                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10515                                    "cannot allocate resource memory");
10516                 return NULL;
10517         }
10518         entry->idx = idx;
10519         entry->tag_id = *(uint32_t *)(ctx->data);
10520         ret = mlx5_flow_os_create_flow_action_tag(entry->tag_id,
10521                                                   &entry->action);
10522         if (ret) {
10523                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
10524                 rte_flow_error_set(ctx->error, ENOMEM,
10525                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10526                                    NULL, "cannot create action");
10527                 return NULL;
10528         }
10529         return &entry->entry;
10530 }
10531
10532 int
10533 flow_dv_tag_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10534                      void *cb_ctx)
10535 {
10536         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10537         struct mlx5_flow_dv_tag_resource *tag =
10538                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10539
10540         return *(uint32_t *)(ctx->data) != tag->tag_id;
10541 }
10542
10543 struct mlx5_list_entry *
10544 flow_dv_tag_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10545                      void *cb_ctx)
10546 {
10547         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10548         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10549         struct mlx5_flow_dv_tag_resource *entry;
10550         uint32_t idx = 0;
10551
10552         entry = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10553         if (!entry) {
10554                 rte_flow_error_set(ctx->error, ENOMEM,
10555                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10556                                    "cannot allocate tag resource memory");
10557                 return NULL;
10558         }
10559         memcpy(entry, oentry, sizeof(*entry));
10560         entry->idx = idx;
10561         return &entry->entry;
10562 }
10563
10564 void
10565 flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10566 {
10567         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10568         struct mlx5_flow_dv_tag_resource *tag =
10569                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10570
10571         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10572 }
10573
10574 /**
10575  * Find existing tag resource or create and register a new one.
10576  *
10577  * @param dev[in, out]
10578  *   Pointer to rte_eth_dev structure.
10579  * @param[in, out] tag_be24
10580  *   Tag value in big endian then R-shift 8.
10581  * @parm[in, out] dev_flow
10582  *   Pointer to the dev_flow.
10583  * @param[out] error
10584  *   pointer to error structure.
10585  *
10586  * @return
10587  *   0 on success otherwise -errno and errno is set.
10588  */
10589 static int
10590 flow_dv_tag_resource_register
10591                         (struct rte_eth_dev *dev,
10592                          uint32_t tag_be24,
10593                          struct mlx5_flow *dev_flow,
10594                          struct rte_flow_error *error)
10595 {
10596         struct mlx5_priv *priv = dev->data->dev_private;
10597         struct mlx5_flow_dv_tag_resource *resource;
10598         struct mlx5_list_entry *entry;
10599         struct mlx5_flow_cb_ctx ctx = {
10600                                         .error = error,
10601                                         .data = &tag_be24,
10602                                         };
10603
10604         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, &ctx);
10605         if (entry) {
10606                 resource = container_of(entry, struct mlx5_flow_dv_tag_resource,
10607                                         entry);
10608                 dev_flow->handle->dvh.rix_tag = resource->idx;
10609                 dev_flow->dv.tag_resource = resource;
10610                 return 0;
10611         }
10612         return -rte_errno;
10613 }
10614
10615 void
10616 flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10617 {
10618         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10619         struct mlx5_flow_dv_tag_resource *tag =
10620                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10621
10622         MLX5_ASSERT(tag && sh && tag->action);
10623         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
10624         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
10625         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10626 }
10627
10628 /**
10629  * Release the tag.
10630  *
10631  * @param dev
10632  *   Pointer to Ethernet device.
10633  * @param tag_idx
10634  *   Tag index.
10635  *
10636  * @return
10637  *   1 while a reference on it exists, 0 when freed.
10638  */
10639 static int
10640 flow_dv_tag_release(struct rte_eth_dev *dev,
10641                     uint32_t tag_idx)
10642 {
10643         struct mlx5_priv *priv = dev->data->dev_private;
10644         struct mlx5_flow_dv_tag_resource *tag;
10645
10646         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
10647         if (!tag)
10648                 return 0;
10649         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
10650                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
10651         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
10652 }
10653
10654 /**
10655  * Translate port ID action to vport.
10656  *
10657  * @param[in] dev
10658  *   Pointer to rte_eth_dev structure.
10659  * @param[in] action
10660  *   Pointer to the port ID action.
10661  * @param[out] dst_port_id
10662  *   The target port ID.
10663  * @param[out] error
10664  *   Pointer to the error structure.
10665  *
10666  * @return
10667  *   0 on success, a negative errno value otherwise and rte_errno is set.
10668  */
10669 static int
10670 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
10671                                  const struct rte_flow_action *action,
10672                                  uint32_t *dst_port_id,
10673                                  struct rte_flow_error *error)
10674 {
10675         uint32_t port;
10676         struct mlx5_priv *priv;
10677         const struct rte_flow_action_port_id *conf =
10678                         (const struct rte_flow_action_port_id *)action->conf;
10679
10680         port = conf->original ? dev->data->port_id : conf->id;
10681         priv = mlx5_port_to_eswitch_info(port, false);
10682         if (!priv)
10683                 return rte_flow_error_set(error, -rte_errno,
10684                                           RTE_FLOW_ERROR_TYPE_ACTION,
10685                                           NULL,
10686                                           "No eswitch info was found for port");
10687 #ifdef HAVE_MLX5DV_DR_CREATE_DEST_IB_PORT
10688         /*
10689          * This parameter is transferred to
10690          * mlx5dv_dr_action_create_dest_ib_port().
10691          */
10692         *dst_port_id = priv->dev_port;
10693 #else
10694         /*
10695          * Legacy mode, no LAG configurations is supported.
10696          * This parameter is transferred to
10697          * mlx5dv_dr_action_create_dest_vport().
10698          */
10699         *dst_port_id = priv->vport_id;
10700 #endif
10701         return 0;
10702 }
10703
10704 /**
10705  * Create a counter with aging configuration.
10706  *
10707  * @param[in] dev
10708  *   Pointer to rte_eth_dev structure.
10709  * @param[in] dev_flow
10710  *   Pointer to the mlx5_flow.
10711  * @param[out] count
10712  *   Pointer to the counter action configuration.
10713  * @param[in] age
10714  *   Pointer to the aging action configuration.
10715  *
10716  * @return
10717  *   Index to flow counter on success, 0 otherwise.
10718  */
10719 static uint32_t
10720 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
10721                                 struct mlx5_flow *dev_flow,
10722                                 const struct rte_flow_action_count *count,
10723                                 const struct rte_flow_action_age *age)
10724 {
10725         uint32_t counter;
10726         struct mlx5_age_param *age_param;
10727
10728         if (count && count->shared)
10729                 counter = flow_dv_counter_get_shared(dev, count->id);
10730         else
10731                 counter = flow_dv_counter_alloc(dev, !!age);
10732         if (!counter || age == NULL)
10733                 return counter;
10734         age_param = flow_dv_counter_idx_get_age(dev, counter);
10735         age_param->context = age->context ? age->context :
10736                 (void *)(uintptr_t)(dev_flow->flow_idx);
10737         age_param->timeout = age->timeout;
10738         age_param->port_id = dev->data->port_id;
10739         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
10740         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
10741         return counter;
10742 }
10743
10744 /**
10745  * Add Tx queue matcher
10746  *
10747  * @param[in] dev
10748  *   Pointer to the dev struct.
10749  * @param[in, out] matcher
10750  *   Flow matcher.
10751  * @param[in, out] key
10752  *   Flow matcher value.
10753  * @param[in] item
10754  *   Flow pattern to translate.
10755  * @param[in] inner
10756  *   Item is inner pattern.
10757  */
10758 static void
10759 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
10760                                 void *matcher, void *key,
10761                                 const struct rte_flow_item *item)
10762 {
10763         const struct mlx5_rte_flow_item_tx_queue *queue_m;
10764         const struct mlx5_rte_flow_item_tx_queue *queue_v;
10765         void *misc_m =
10766                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
10767         void *misc_v =
10768                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
10769         struct mlx5_txq_ctrl *txq;
10770         uint32_t queue;
10771
10772
10773         queue_m = (const void *)item->mask;
10774         if (!queue_m)
10775                 return;
10776         queue_v = (const void *)item->spec;
10777         if (!queue_v)
10778                 return;
10779         txq = mlx5_txq_get(dev, queue_v->queue);
10780         if (!txq)
10781                 return;
10782         queue = txq->obj->sq->id;
10783         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
10784         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
10785                  queue & queue_m->queue);
10786         mlx5_txq_release(dev, queue_v->queue);
10787 }
10788
10789 /**
10790  * Set the hash fields according to the @p flow information.
10791  *
10792  * @param[in] dev_flow
10793  *   Pointer to the mlx5_flow.
10794  * @param[in] rss_desc
10795  *   Pointer to the mlx5_flow_rss_desc.
10796  */
10797 static void
10798 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
10799                        struct mlx5_flow_rss_desc *rss_desc)
10800 {
10801         uint64_t items = dev_flow->handle->layers;
10802         int rss_inner = 0;
10803         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
10804
10805         dev_flow->hash_fields = 0;
10806 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
10807         if (rss_desc->level >= 2) {
10808                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
10809                 rss_inner = 1;
10810         }
10811 #endif
10812         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
10813             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
10814                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
10815                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10816                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
10817                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10818                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
10819                         else
10820                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
10821                 }
10822         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
10823                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
10824                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
10825                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10826                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
10827                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10828                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
10829                         else
10830                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
10831                 }
10832         }
10833         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
10834             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
10835                 if (rss_types & ETH_RSS_UDP) {
10836                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10837                                 dev_flow->hash_fields |=
10838                                                 IBV_RX_HASH_SRC_PORT_UDP;
10839                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10840                                 dev_flow->hash_fields |=
10841                                                 IBV_RX_HASH_DST_PORT_UDP;
10842                         else
10843                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
10844                 }
10845         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
10846                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
10847                 if (rss_types & ETH_RSS_TCP) {
10848                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10849                                 dev_flow->hash_fields |=
10850                                                 IBV_RX_HASH_SRC_PORT_TCP;
10851                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10852                                 dev_flow->hash_fields |=
10853                                                 IBV_RX_HASH_DST_PORT_TCP;
10854                         else
10855                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
10856                 }
10857         }
10858 }
10859
10860 /**
10861  * Prepare an Rx Hash queue.
10862  *
10863  * @param dev
10864  *   Pointer to Ethernet device.
10865  * @param[in] dev_flow
10866  *   Pointer to the mlx5_flow.
10867  * @param[in] rss_desc
10868  *   Pointer to the mlx5_flow_rss_desc.
10869  * @param[out] hrxq_idx
10870  *   Hash Rx queue index.
10871  *
10872  * @return
10873  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
10874  */
10875 static struct mlx5_hrxq *
10876 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
10877                      struct mlx5_flow *dev_flow,
10878                      struct mlx5_flow_rss_desc *rss_desc,
10879                      uint32_t *hrxq_idx)
10880 {
10881         struct mlx5_priv *priv = dev->data->dev_private;
10882         struct mlx5_flow_handle *dh = dev_flow->handle;
10883         struct mlx5_hrxq *hrxq;
10884
10885         MLX5_ASSERT(rss_desc->queue_num);
10886         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
10887         rss_desc->hash_fields = dev_flow->hash_fields;
10888         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
10889         rss_desc->shared_rss = 0;
10890         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
10891         if (!*hrxq_idx)
10892                 return NULL;
10893         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10894                               *hrxq_idx);
10895         return hrxq;
10896 }
10897
10898 /**
10899  * Release sample sub action resource.
10900  *
10901  * @param[in, out] dev
10902  *   Pointer to rte_eth_dev structure.
10903  * @param[in] act_res
10904  *   Pointer to sample sub action resource.
10905  */
10906 static void
10907 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
10908                                    struct mlx5_flow_sub_actions_idx *act_res)
10909 {
10910         if (act_res->rix_hrxq) {
10911                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
10912                 act_res->rix_hrxq = 0;
10913         }
10914         if (act_res->rix_encap_decap) {
10915                 flow_dv_encap_decap_resource_release(dev,
10916                                                      act_res->rix_encap_decap);
10917                 act_res->rix_encap_decap = 0;
10918         }
10919         if (act_res->rix_port_id_action) {
10920                 flow_dv_port_id_action_resource_release(dev,
10921                                                 act_res->rix_port_id_action);
10922                 act_res->rix_port_id_action = 0;
10923         }
10924         if (act_res->rix_tag) {
10925                 flow_dv_tag_release(dev, act_res->rix_tag);
10926                 act_res->rix_tag = 0;
10927         }
10928         if (act_res->rix_jump) {
10929                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
10930                 act_res->rix_jump = 0;
10931         }
10932 }
10933
10934 int
10935 flow_dv_sample_match_cb(void *tool_ctx __rte_unused,
10936                         struct mlx5_list_entry *entry, void *cb_ctx)
10937 {
10938         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10939         struct rte_eth_dev *dev = ctx->dev;
10940         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
10941         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
10942                                                               typeof(*resource),
10943                                                               entry);
10944
10945         if (ctx_resource->ratio == resource->ratio &&
10946             ctx_resource->ft_type == resource->ft_type &&
10947             ctx_resource->ft_id == resource->ft_id &&
10948             ctx_resource->set_action == resource->set_action &&
10949             !memcmp((void *)&ctx_resource->sample_act,
10950                     (void *)&resource->sample_act,
10951                     sizeof(struct mlx5_flow_sub_actions_list))) {
10952                 /*
10953                  * Existing sample action should release the prepared
10954                  * sub-actions reference counter.
10955                  */
10956                 flow_dv_sample_sub_actions_release(dev,
10957                                                    &ctx_resource->sample_idx);
10958                 return 0;
10959         }
10960         return 1;
10961 }
10962
10963 struct mlx5_list_entry *
10964 flow_dv_sample_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
10965 {
10966         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10967         struct rte_eth_dev *dev = ctx->dev;
10968         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
10969         void **sample_dv_actions = ctx_resource->sub_actions;
10970         struct mlx5_flow_dv_sample_resource *resource;
10971         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
10972         struct mlx5_priv *priv = dev->data->dev_private;
10973         struct mlx5_dev_ctx_shared *sh = priv->sh;
10974         struct mlx5_flow_tbl_resource *tbl;
10975         uint32_t idx = 0;
10976         const uint32_t next_ft_step = 1;
10977         uint32_t next_ft_id = ctx_resource->ft_id + next_ft_step;
10978         uint8_t is_egress = 0;
10979         uint8_t is_transfer = 0;
10980         struct rte_flow_error *error = ctx->error;
10981
10982         /* Register new sample resource. */
10983         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
10984         if (!resource) {
10985                 rte_flow_error_set(error, ENOMEM,
10986                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10987                                           NULL,
10988                                           "cannot allocate resource memory");
10989                 return NULL;
10990         }
10991         *resource = *ctx_resource;
10992         /* Create normal path table level */
10993         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10994                 is_transfer = 1;
10995         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
10996                 is_egress = 1;
10997         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
10998                                         is_egress, is_transfer,
10999                                         true, NULL, 0, 0, 0, error);
11000         if (!tbl) {
11001                 rte_flow_error_set(error, ENOMEM,
11002                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11003                                           NULL,
11004                                           "fail to create normal path table "
11005                                           "for sample");
11006                 goto error;
11007         }
11008         resource->normal_path_tbl = tbl;
11009         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
11010                 if (!sh->default_miss_action) {
11011                         rte_flow_error_set(error, ENOMEM,
11012                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11013                                                 NULL,
11014                                                 "default miss action was not "
11015                                                 "created");
11016                         goto error;
11017                 }
11018                 sample_dv_actions[ctx_resource->sample_act.actions_num++] =
11019                                                 sh->default_miss_action;
11020         }
11021         /* Create a DR sample action */
11022         sampler_attr.sample_ratio = resource->ratio;
11023         sampler_attr.default_next_table = tbl->obj;
11024         sampler_attr.num_sample_actions = ctx_resource->sample_act.actions_num;
11025         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
11026                                                         &sample_dv_actions[0];
11027         sampler_attr.action = resource->set_action;
11028         if (mlx5_os_flow_dr_create_flow_action_sampler
11029                         (&sampler_attr, &resource->verbs_action)) {
11030                 rte_flow_error_set(error, ENOMEM,
11031                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11032                                         NULL, "cannot create sample action");
11033                 goto error;
11034         }
11035         resource->idx = idx;
11036         resource->dev = dev;
11037         return &resource->entry;
11038 error:
11039         if (resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
11040                 flow_dv_sample_sub_actions_release(dev,
11041                                                    &resource->sample_idx);
11042         if (resource->normal_path_tbl)
11043                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11044                                 resource->normal_path_tbl);
11045         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
11046         return NULL;
11047
11048 }
11049
11050 struct mlx5_list_entry *
11051 flow_dv_sample_clone_cb(void *tool_ctx __rte_unused,
11052                          struct mlx5_list_entry *entry __rte_unused,
11053                          void *cb_ctx)
11054 {
11055         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11056         struct rte_eth_dev *dev = ctx->dev;
11057         struct mlx5_flow_dv_sample_resource *resource;
11058         struct mlx5_priv *priv = dev->data->dev_private;
11059         struct mlx5_dev_ctx_shared *sh = priv->sh;
11060         uint32_t idx = 0;
11061
11062         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11063         if (!resource) {
11064                 rte_flow_error_set(ctx->error, ENOMEM,
11065                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11066                                           NULL,
11067                                           "cannot allocate resource memory");
11068                 return NULL;
11069         }
11070         memcpy(resource, entry, sizeof(*resource));
11071         resource->idx = idx;
11072         resource->dev = dev;
11073         return &resource->entry;
11074 }
11075
11076 void
11077 flow_dv_sample_clone_free_cb(void *tool_ctx __rte_unused,
11078                              struct mlx5_list_entry *entry)
11079 {
11080         struct mlx5_flow_dv_sample_resource *resource =
11081                                   container_of(entry, typeof(*resource), entry);
11082         struct rte_eth_dev *dev = resource->dev;
11083         struct mlx5_priv *priv = dev->data->dev_private;
11084
11085         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
11086 }
11087
11088 /**
11089  * Find existing sample resource or create and register a new one.
11090  *
11091  * @param[in, out] dev
11092  *   Pointer to rte_eth_dev structure.
11093  * @param[in] ref
11094  *   Pointer to sample resource reference.
11095  * @parm[in, out] dev_flow
11096  *   Pointer to the dev_flow.
11097  * @param[out] error
11098  *   pointer to error structure.
11099  *
11100  * @return
11101  *   0 on success otherwise -errno and errno is set.
11102  */
11103 static int
11104 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
11105                          struct mlx5_flow_dv_sample_resource *ref,
11106                          struct mlx5_flow *dev_flow,
11107                          struct rte_flow_error *error)
11108 {
11109         struct mlx5_flow_dv_sample_resource *resource;
11110         struct mlx5_list_entry *entry;
11111         struct mlx5_priv *priv = dev->data->dev_private;
11112         struct mlx5_flow_cb_ctx ctx = {
11113                 .dev = dev,
11114                 .error = error,
11115                 .data = ref,
11116         };
11117
11118         entry = mlx5_list_register(priv->sh->sample_action_list, &ctx);
11119         if (!entry)
11120                 return -rte_errno;
11121         resource = container_of(entry, typeof(*resource), entry);
11122         dev_flow->handle->dvh.rix_sample = resource->idx;
11123         dev_flow->dv.sample_res = resource;
11124         return 0;
11125 }
11126
11127 int
11128 flow_dv_dest_array_match_cb(void *tool_ctx __rte_unused,
11129                             struct mlx5_list_entry *entry, void *cb_ctx)
11130 {
11131         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11132         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11133         struct rte_eth_dev *dev = ctx->dev;
11134         struct mlx5_flow_dv_dest_array_resource *resource =
11135                                   container_of(entry, typeof(*resource), entry);
11136         uint32_t idx = 0;
11137
11138         if (ctx_resource->num_of_dest == resource->num_of_dest &&
11139             ctx_resource->ft_type == resource->ft_type &&
11140             !memcmp((void *)resource->sample_act,
11141                     (void *)ctx_resource->sample_act,
11142                    (ctx_resource->num_of_dest *
11143                    sizeof(struct mlx5_flow_sub_actions_list)))) {
11144                 /*
11145                  * Existing sample action should release the prepared
11146                  * sub-actions reference counter.
11147                  */
11148                 for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11149                         flow_dv_sample_sub_actions_release(dev,
11150                                         &ctx_resource->sample_idx[idx]);
11151                 return 0;
11152         }
11153         return 1;
11154 }
11155
11156 struct mlx5_list_entry *
11157 flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11158 {
11159         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11160         struct rte_eth_dev *dev = ctx->dev;
11161         struct mlx5_flow_dv_dest_array_resource *resource;
11162         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11163         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
11164         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
11165         struct mlx5_priv *priv = dev->data->dev_private;
11166         struct mlx5_dev_ctx_shared *sh = priv->sh;
11167         struct mlx5_flow_sub_actions_list *sample_act;
11168         struct mlx5dv_dr_domain *domain;
11169         uint32_t idx = 0, res_idx = 0;
11170         struct rte_flow_error *error = ctx->error;
11171         uint64_t action_flags;
11172         int ret;
11173
11174         /* Register new destination array resource. */
11175         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11176                                             &res_idx);
11177         if (!resource) {
11178                 rte_flow_error_set(error, ENOMEM,
11179                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11180                                           NULL,
11181                                           "cannot allocate resource memory");
11182                 return NULL;
11183         }
11184         *resource = *ctx_resource;
11185         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11186                 domain = sh->fdb_domain;
11187         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
11188                 domain = sh->rx_domain;
11189         else
11190                 domain = sh->tx_domain;
11191         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11192                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
11193                                  mlx5_malloc(MLX5_MEM_ZERO,
11194                                  sizeof(struct mlx5dv_dr_action_dest_attr),
11195                                  0, SOCKET_ID_ANY);
11196                 if (!dest_attr[idx]) {
11197                         rte_flow_error_set(error, ENOMEM,
11198                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11199                                            NULL,
11200                                            "cannot allocate resource memory");
11201                         goto error;
11202                 }
11203                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
11204                 sample_act = &ctx_resource->sample_act[idx];
11205                 action_flags = sample_act->action_flags;
11206                 switch (action_flags) {
11207                 case MLX5_FLOW_ACTION_QUEUE:
11208                         dest_attr[idx]->dest = sample_act->dr_queue_action;
11209                         break;
11210                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
11211                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
11212                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
11213                         dest_attr[idx]->dest_reformat->reformat =
11214                                         sample_act->dr_encap_action;
11215                         dest_attr[idx]->dest_reformat->dest =
11216                                         sample_act->dr_port_id_action;
11217                         break;
11218                 case MLX5_FLOW_ACTION_PORT_ID:
11219                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
11220                         break;
11221                 case MLX5_FLOW_ACTION_JUMP:
11222                         dest_attr[idx]->dest = sample_act->dr_jump_action;
11223                         break;
11224                 default:
11225                         rte_flow_error_set(error, EINVAL,
11226                                            RTE_FLOW_ERROR_TYPE_ACTION,
11227                                            NULL,
11228                                            "unsupported actions type");
11229                         goto error;
11230                 }
11231         }
11232         /* create a dest array actioin */
11233         ret = mlx5_os_flow_dr_create_flow_action_dest_array
11234                                                 (domain,
11235                                                  resource->num_of_dest,
11236                                                  dest_attr,
11237                                                  &resource->action);
11238         if (ret) {
11239                 rte_flow_error_set(error, ENOMEM,
11240                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11241                                    NULL,
11242                                    "cannot create destination array action");
11243                 goto error;
11244         }
11245         resource->idx = res_idx;
11246         resource->dev = dev;
11247         for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11248                 mlx5_free(dest_attr[idx]);
11249         return &resource->entry;
11250 error:
11251         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11252                 flow_dv_sample_sub_actions_release(dev,
11253                                                    &resource->sample_idx[idx]);
11254                 if (dest_attr[idx])
11255                         mlx5_free(dest_attr[idx]);
11256         }
11257         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
11258         return NULL;
11259 }
11260
11261 struct mlx5_list_entry *
11262 flow_dv_dest_array_clone_cb(void *tool_ctx __rte_unused,
11263                             struct mlx5_list_entry *entry __rte_unused,
11264                             void *cb_ctx)
11265 {
11266         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11267         struct rte_eth_dev *dev = ctx->dev;
11268         struct mlx5_flow_dv_dest_array_resource *resource;
11269         struct mlx5_priv *priv = dev->data->dev_private;
11270         struct mlx5_dev_ctx_shared *sh = priv->sh;
11271         uint32_t res_idx = 0;
11272         struct rte_flow_error *error = ctx->error;
11273
11274         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11275                                       &res_idx);
11276         if (!resource) {
11277                 rte_flow_error_set(error, ENOMEM,
11278                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11279                                           NULL,
11280                                           "cannot allocate dest-array memory");
11281                 return NULL;
11282         }
11283         memcpy(resource, entry, sizeof(*resource));
11284         resource->idx = res_idx;
11285         resource->dev = dev;
11286         return &resource->entry;
11287 }
11288
11289 void
11290 flow_dv_dest_array_clone_free_cb(void *tool_ctx __rte_unused,
11291                                  struct mlx5_list_entry *entry)
11292 {
11293         struct mlx5_flow_dv_dest_array_resource *resource =
11294                         container_of(entry, typeof(*resource), entry);
11295         struct rte_eth_dev *dev = resource->dev;
11296         struct mlx5_priv *priv = dev->data->dev_private;
11297
11298         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
11299 }
11300
11301 /**
11302  * Find existing destination array resource or create and register a new one.
11303  *
11304  * @param[in, out] dev
11305  *   Pointer to rte_eth_dev structure.
11306  * @param[in] ref
11307  *   Pointer to destination array resource reference.
11308  * @parm[in, out] dev_flow
11309  *   Pointer to the dev_flow.
11310  * @param[out] error
11311  *   pointer to error structure.
11312  *
11313  * @return
11314  *   0 on success otherwise -errno and errno is set.
11315  */
11316 static int
11317 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
11318                          struct mlx5_flow_dv_dest_array_resource *ref,
11319                          struct mlx5_flow *dev_flow,
11320                          struct rte_flow_error *error)
11321 {
11322         struct mlx5_flow_dv_dest_array_resource *resource;
11323         struct mlx5_priv *priv = dev->data->dev_private;
11324         struct mlx5_list_entry *entry;
11325         struct mlx5_flow_cb_ctx ctx = {
11326                 .dev = dev,
11327                 .error = error,
11328                 .data = ref,
11329         };
11330
11331         entry = mlx5_list_register(priv->sh->dest_array_list, &ctx);
11332         if (!entry)
11333                 return -rte_errno;
11334         resource = container_of(entry, typeof(*resource), entry);
11335         dev_flow->handle->dvh.rix_dest_array = resource->idx;
11336         dev_flow->dv.dest_array_res = resource;
11337         return 0;
11338 }
11339
11340 /**
11341  * Convert Sample action to DV specification.
11342  *
11343  * @param[in] dev
11344  *   Pointer to rte_eth_dev structure.
11345  * @param[in] action
11346  *   Pointer to sample action structure.
11347  * @param[in, out] dev_flow
11348  *   Pointer to the mlx5_flow.
11349  * @param[in] attr
11350  *   Pointer to the flow attributes.
11351  * @param[in, out] num_of_dest
11352  *   Pointer to the num of destination.
11353  * @param[in, out] sample_actions
11354  *   Pointer to sample actions list.
11355  * @param[in, out] res
11356  *   Pointer to sample resource.
11357  * @param[out] error
11358  *   Pointer to the error structure.
11359  *
11360  * @return
11361  *   0 on success, a negative errno value otherwise and rte_errno is set.
11362  */
11363 static int
11364 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
11365                                 const struct rte_flow_action_sample *action,
11366                                 struct mlx5_flow *dev_flow,
11367                                 const struct rte_flow_attr *attr,
11368                                 uint32_t *num_of_dest,
11369                                 void **sample_actions,
11370                                 struct mlx5_flow_dv_sample_resource *res,
11371                                 struct rte_flow_error *error)
11372 {
11373         struct mlx5_priv *priv = dev->data->dev_private;
11374         const struct rte_flow_action *sub_actions;
11375         struct mlx5_flow_sub_actions_list *sample_act;
11376         struct mlx5_flow_sub_actions_idx *sample_idx;
11377         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11378         struct rte_flow *flow = dev_flow->flow;
11379         struct mlx5_flow_rss_desc *rss_desc;
11380         uint64_t action_flags = 0;
11381
11382         MLX5_ASSERT(wks);
11383         rss_desc = &wks->rss_desc;
11384         sample_act = &res->sample_act;
11385         sample_idx = &res->sample_idx;
11386         res->ratio = action->ratio;
11387         sub_actions = action->actions;
11388         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
11389                 int type = sub_actions->type;
11390                 uint32_t pre_rix = 0;
11391                 void *pre_r;
11392                 switch (type) {
11393                 case RTE_FLOW_ACTION_TYPE_QUEUE:
11394                 {
11395                         const struct rte_flow_action_queue *queue;
11396                         struct mlx5_hrxq *hrxq;
11397                         uint32_t hrxq_idx;
11398
11399                         queue = sub_actions->conf;
11400                         rss_desc->queue_num = 1;
11401                         rss_desc->queue[0] = queue->index;
11402                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11403                                                     rss_desc, &hrxq_idx);
11404                         if (!hrxq)
11405                                 return rte_flow_error_set
11406                                         (error, rte_errno,
11407                                          RTE_FLOW_ERROR_TYPE_ACTION,
11408                                          NULL,
11409                                          "cannot create fate queue");
11410                         sample_act->dr_queue_action = hrxq->action;
11411                         sample_idx->rix_hrxq = hrxq_idx;
11412                         sample_actions[sample_act->actions_num++] =
11413                                                 hrxq->action;
11414                         (*num_of_dest)++;
11415                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
11416                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11417                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11418                         dev_flow->handle->fate_action =
11419                                         MLX5_FLOW_FATE_QUEUE;
11420                         break;
11421                 }
11422                 case RTE_FLOW_ACTION_TYPE_RSS:
11423                 {
11424                         struct mlx5_hrxq *hrxq;
11425                         uint32_t hrxq_idx;
11426                         const struct rte_flow_action_rss *rss;
11427                         const uint8_t *rss_key;
11428
11429                         rss = sub_actions->conf;
11430                         memcpy(rss_desc->queue, rss->queue,
11431                                rss->queue_num * sizeof(uint16_t));
11432                         rss_desc->queue_num = rss->queue_num;
11433                         /* NULL RSS key indicates default RSS key. */
11434                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11435                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11436                         /*
11437                          * rss->level and rss.types should be set in advance
11438                          * when expanding items for RSS.
11439                          */
11440                         flow_dv_hashfields_set(dev_flow, rss_desc);
11441                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11442                                                     rss_desc, &hrxq_idx);
11443                         if (!hrxq)
11444                                 return rte_flow_error_set
11445                                         (error, rte_errno,
11446                                          RTE_FLOW_ERROR_TYPE_ACTION,
11447                                          NULL,
11448                                          "cannot create fate queue");
11449                         sample_act->dr_queue_action = hrxq->action;
11450                         sample_idx->rix_hrxq = hrxq_idx;
11451                         sample_actions[sample_act->actions_num++] =
11452                                                 hrxq->action;
11453                         (*num_of_dest)++;
11454                         action_flags |= MLX5_FLOW_ACTION_RSS;
11455                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11456                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11457                         dev_flow->handle->fate_action =
11458                                         MLX5_FLOW_FATE_QUEUE;
11459                         break;
11460                 }
11461                 case RTE_FLOW_ACTION_TYPE_MARK:
11462                 {
11463                         uint32_t tag_be = mlx5_flow_mark_set
11464                                 (((const struct rte_flow_action_mark *)
11465                                 (sub_actions->conf))->id);
11466
11467                         dev_flow->handle->mark = 1;
11468                         pre_rix = dev_flow->handle->dvh.rix_tag;
11469                         /* Save the mark resource before sample */
11470                         pre_r = dev_flow->dv.tag_resource;
11471                         if (flow_dv_tag_resource_register(dev, tag_be,
11472                                                   dev_flow, error))
11473                                 return -rte_errno;
11474                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11475                         sample_act->dr_tag_action =
11476                                 dev_flow->dv.tag_resource->action;
11477                         sample_idx->rix_tag =
11478                                 dev_flow->handle->dvh.rix_tag;
11479                         sample_actions[sample_act->actions_num++] =
11480                                                 sample_act->dr_tag_action;
11481                         /* Recover the mark resource after sample */
11482                         dev_flow->dv.tag_resource = pre_r;
11483                         dev_flow->handle->dvh.rix_tag = pre_rix;
11484                         action_flags |= MLX5_FLOW_ACTION_MARK;
11485                         break;
11486                 }
11487                 case RTE_FLOW_ACTION_TYPE_COUNT:
11488                 {
11489                         if (!flow->counter) {
11490                                 flow->counter =
11491                                         flow_dv_translate_create_counter(dev,
11492                                                 dev_flow, sub_actions->conf,
11493                                                 0);
11494                                 if (!flow->counter)
11495                                         return rte_flow_error_set
11496                                                 (error, rte_errno,
11497                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11498                                                 NULL,
11499                                                 "cannot create counter"
11500                                                 " object.");
11501                         }
11502                         sample_act->dr_cnt_action =
11503                                   (flow_dv_counter_get_by_idx(dev,
11504                                   flow->counter, NULL))->action;
11505                         sample_actions[sample_act->actions_num++] =
11506                                                 sample_act->dr_cnt_action;
11507                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11508                         break;
11509                 }
11510                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11511                 {
11512                         struct mlx5_flow_dv_port_id_action_resource
11513                                         port_id_resource;
11514                         uint32_t port_id = 0;
11515
11516                         memset(&port_id_resource, 0, sizeof(port_id_resource));
11517                         /* Save the port id resource before sample */
11518                         pre_rix = dev_flow->handle->rix_port_id_action;
11519                         pre_r = dev_flow->dv.port_id_action;
11520                         if (flow_dv_translate_action_port_id(dev, sub_actions,
11521                                                              &port_id, error))
11522                                 return -rte_errno;
11523                         port_id_resource.port_id = port_id;
11524                         if (flow_dv_port_id_action_resource_register
11525                             (dev, &port_id_resource, dev_flow, error))
11526                                 return -rte_errno;
11527                         sample_act->dr_port_id_action =
11528                                 dev_flow->dv.port_id_action->action;
11529                         sample_idx->rix_port_id_action =
11530                                 dev_flow->handle->rix_port_id_action;
11531                         sample_actions[sample_act->actions_num++] =
11532                                                 sample_act->dr_port_id_action;
11533                         /* Recover the port id resource after sample */
11534                         dev_flow->dv.port_id_action = pre_r;
11535                         dev_flow->handle->rix_port_id_action = pre_rix;
11536                         (*num_of_dest)++;
11537                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11538                         break;
11539                 }
11540                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11541                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11542                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11543                         /* Save the encap resource before sample */
11544                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
11545                         pre_r = dev_flow->dv.encap_decap;
11546                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
11547                                                            dev_flow,
11548                                                            attr->transfer,
11549                                                            error))
11550                                 return -rte_errno;
11551                         sample_act->dr_encap_action =
11552                                 dev_flow->dv.encap_decap->action;
11553                         sample_idx->rix_encap_decap =
11554                                 dev_flow->handle->dvh.rix_encap_decap;
11555                         sample_actions[sample_act->actions_num++] =
11556                                                 sample_act->dr_encap_action;
11557                         /* Recover the encap resource after sample */
11558                         dev_flow->dv.encap_decap = pre_r;
11559                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
11560                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11561                         break;
11562                 default:
11563                         return rte_flow_error_set(error, EINVAL,
11564                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11565                                 NULL,
11566                                 "Not support for sampler action");
11567                 }
11568         }
11569         sample_act->action_flags = action_flags;
11570         res->ft_id = dev_flow->dv.group;
11571         if (attr->transfer) {
11572                 union {
11573                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
11574                         uint64_t set_action;
11575                 } action_ctx = { .set_action = 0 };
11576
11577                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
11578                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
11579                          MLX5_MODIFICATION_TYPE_SET);
11580                 MLX5_SET(set_action_in, action_ctx.action_in, field,
11581                          MLX5_MODI_META_REG_C_0);
11582                 MLX5_SET(set_action_in, action_ctx.action_in, data,
11583                          priv->vport_meta_tag);
11584                 res->set_action = action_ctx.set_action;
11585         } else if (attr->ingress) {
11586                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11587         } else {
11588                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
11589         }
11590         return 0;
11591 }
11592
11593 /**
11594  * Convert Sample action to DV specification.
11595  *
11596  * @param[in] dev
11597  *   Pointer to rte_eth_dev structure.
11598  * @param[in, out] dev_flow
11599  *   Pointer to the mlx5_flow.
11600  * @param[in] num_of_dest
11601  *   The num of destination.
11602  * @param[in, out] res
11603  *   Pointer to sample resource.
11604  * @param[in, out] mdest_res
11605  *   Pointer to destination array resource.
11606  * @param[in] sample_actions
11607  *   Pointer to sample path actions list.
11608  * @param[in] action_flags
11609  *   Holds the actions detected until now.
11610  * @param[out] error
11611  *   Pointer to the error structure.
11612  *
11613  * @return
11614  *   0 on success, a negative errno value otherwise and rte_errno is set.
11615  */
11616 static int
11617 flow_dv_create_action_sample(struct rte_eth_dev *dev,
11618                              struct mlx5_flow *dev_flow,
11619                              uint32_t num_of_dest,
11620                              struct mlx5_flow_dv_sample_resource *res,
11621                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
11622                              void **sample_actions,
11623                              uint64_t action_flags,
11624                              struct rte_flow_error *error)
11625 {
11626         /* update normal path action resource into last index of array */
11627         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
11628         struct mlx5_flow_sub_actions_list *sample_act =
11629                                         &mdest_res->sample_act[dest_index];
11630         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11631         struct mlx5_flow_rss_desc *rss_desc;
11632         uint32_t normal_idx = 0;
11633         struct mlx5_hrxq *hrxq;
11634         uint32_t hrxq_idx;
11635
11636         MLX5_ASSERT(wks);
11637         rss_desc = &wks->rss_desc;
11638         if (num_of_dest > 1) {
11639                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
11640                         /* Handle QP action for mirroring */
11641                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11642                                                     rss_desc, &hrxq_idx);
11643                         if (!hrxq)
11644                                 return rte_flow_error_set
11645                                      (error, rte_errno,
11646                                       RTE_FLOW_ERROR_TYPE_ACTION,
11647                                       NULL,
11648                                       "cannot create rx queue");
11649                         normal_idx++;
11650                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
11651                         sample_act->dr_queue_action = hrxq->action;
11652                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11653                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11654                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11655                 }
11656                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
11657                         normal_idx++;
11658                         mdest_res->sample_idx[dest_index].rix_encap_decap =
11659                                 dev_flow->handle->dvh.rix_encap_decap;
11660                         sample_act->dr_encap_action =
11661                                 dev_flow->dv.encap_decap->action;
11662                         dev_flow->handle->dvh.rix_encap_decap = 0;
11663                 }
11664                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
11665                         normal_idx++;
11666                         mdest_res->sample_idx[dest_index].rix_port_id_action =
11667                                 dev_flow->handle->rix_port_id_action;
11668                         sample_act->dr_port_id_action =
11669                                 dev_flow->dv.port_id_action->action;
11670                         dev_flow->handle->rix_port_id_action = 0;
11671                 }
11672                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
11673                         normal_idx++;
11674                         mdest_res->sample_idx[dest_index].rix_jump =
11675                                 dev_flow->handle->rix_jump;
11676                         sample_act->dr_jump_action =
11677                                 dev_flow->dv.jump->action;
11678                         dev_flow->handle->rix_jump = 0;
11679                 }
11680                 sample_act->actions_num = normal_idx;
11681                 /* update sample action resource into first index of array */
11682                 mdest_res->ft_type = res->ft_type;
11683                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
11684                                 sizeof(struct mlx5_flow_sub_actions_idx));
11685                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
11686                                 sizeof(struct mlx5_flow_sub_actions_list));
11687                 mdest_res->num_of_dest = num_of_dest;
11688                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
11689                                                          dev_flow, error))
11690                         return rte_flow_error_set(error, EINVAL,
11691                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11692                                                   NULL, "can't create sample "
11693                                                   "action");
11694         } else {
11695                 res->sub_actions = sample_actions;
11696                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
11697                         return rte_flow_error_set(error, EINVAL,
11698                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11699                                                   NULL,
11700                                                   "can't create sample action");
11701         }
11702         return 0;
11703 }
11704
11705 /**
11706  * Remove an ASO age action from age actions list.
11707  *
11708  * @param[in] dev
11709  *   Pointer to the Ethernet device structure.
11710  * @param[in] age
11711  *   Pointer to the aso age action handler.
11712  */
11713 static void
11714 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
11715                                 struct mlx5_aso_age_action *age)
11716 {
11717         struct mlx5_age_info *age_info;
11718         struct mlx5_age_param *age_param = &age->age_params;
11719         struct mlx5_priv *priv = dev->data->dev_private;
11720         uint16_t expected = AGE_CANDIDATE;
11721
11722         age_info = GET_PORT_AGE_INFO(priv);
11723         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
11724                                          AGE_FREE, false, __ATOMIC_RELAXED,
11725                                          __ATOMIC_RELAXED)) {
11726                 /**
11727                  * We need the lock even it is age timeout,
11728                  * since age action may still in process.
11729                  */
11730                 rte_spinlock_lock(&age_info->aged_sl);
11731                 LIST_REMOVE(age, next);
11732                 rte_spinlock_unlock(&age_info->aged_sl);
11733                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
11734         }
11735 }
11736
11737 /**
11738  * Release an ASO age action.
11739  *
11740  * @param[in] dev
11741  *   Pointer to the Ethernet device structure.
11742  * @param[in] age_idx
11743  *   Index of ASO age action to release.
11744  * @param[in] flow
11745  *   True if the release operation is during flow destroy operation.
11746  *   False if the release operation is during action destroy operation.
11747  *
11748  * @return
11749  *   0 when age action was removed, otherwise the number of references.
11750  */
11751 static int
11752 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
11753 {
11754         struct mlx5_priv *priv = dev->data->dev_private;
11755         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11756         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
11757         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
11758
11759         if (!ret) {
11760                 flow_dv_aso_age_remove_from_age(dev, age);
11761                 rte_spinlock_lock(&mng->free_sl);
11762                 LIST_INSERT_HEAD(&mng->free, age, next);
11763                 rte_spinlock_unlock(&mng->free_sl);
11764         }
11765         return ret;
11766 }
11767
11768 /**
11769  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
11770  *
11771  * @param[in] dev
11772  *   Pointer to the Ethernet device structure.
11773  *
11774  * @return
11775  *   0 on success, otherwise negative errno value and rte_errno is set.
11776  */
11777 static int
11778 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
11779 {
11780         struct mlx5_priv *priv = dev->data->dev_private;
11781         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11782         void *old_pools = mng->pools;
11783         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
11784         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
11785         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11786
11787         if (!pools) {
11788                 rte_errno = ENOMEM;
11789                 return -ENOMEM;
11790         }
11791         if (old_pools) {
11792                 memcpy(pools, old_pools,
11793                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
11794                 mlx5_free(old_pools);
11795         } else {
11796                 /* First ASO flow hit allocation - starting ASO data-path. */
11797                 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
11798
11799                 if (ret) {
11800                         mlx5_free(pools);
11801                         return ret;
11802                 }
11803         }
11804         mng->n = resize;
11805         mng->pools = pools;
11806         return 0;
11807 }
11808
11809 /**
11810  * Create and initialize a new ASO aging pool.
11811  *
11812  * @param[in] dev
11813  *   Pointer to the Ethernet device structure.
11814  * @param[out] age_free
11815  *   Where to put the pointer of a new age action.
11816  *
11817  * @return
11818  *   The age actions pool pointer and @p age_free is set on success,
11819  *   NULL otherwise and rte_errno is set.
11820  */
11821 static struct mlx5_aso_age_pool *
11822 flow_dv_age_pool_create(struct rte_eth_dev *dev,
11823                         struct mlx5_aso_age_action **age_free)
11824 {
11825         struct mlx5_priv *priv = dev->data->dev_private;
11826         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11827         struct mlx5_aso_age_pool *pool = NULL;
11828         struct mlx5_devx_obj *obj = NULL;
11829         uint32_t i;
11830
11831         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
11832                                                     priv->sh->pdn);
11833         if (!obj) {
11834                 rte_errno = ENODATA;
11835                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
11836                 return NULL;
11837         }
11838         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
11839         if (!pool) {
11840                 claim_zero(mlx5_devx_cmd_destroy(obj));
11841                 rte_errno = ENOMEM;
11842                 return NULL;
11843         }
11844         pool->flow_hit_aso_obj = obj;
11845         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
11846         rte_spinlock_lock(&mng->resize_sl);
11847         pool->index = mng->next;
11848         /* Resize pools array if there is no room for the new pool in it. */
11849         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
11850                 claim_zero(mlx5_devx_cmd_destroy(obj));
11851                 mlx5_free(pool);
11852                 rte_spinlock_unlock(&mng->resize_sl);
11853                 return NULL;
11854         }
11855         mng->pools[pool->index] = pool;
11856         mng->next++;
11857         rte_spinlock_unlock(&mng->resize_sl);
11858         /* Assign the first action in the new pool, the rest go to free list. */
11859         *age_free = &pool->actions[0];
11860         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
11861                 pool->actions[i].offset = i;
11862                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
11863         }
11864         return pool;
11865 }
11866
11867 /**
11868  * Allocate a ASO aging bit.
11869  *
11870  * @param[in] dev
11871  *   Pointer to the Ethernet device structure.
11872  * @param[out] error
11873  *   Pointer to the error structure.
11874  *
11875  * @return
11876  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
11877  */
11878 static uint32_t
11879 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
11880 {
11881         struct mlx5_priv *priv = dev->data->dev_private;
11882         const struct mlx5_aso_age_pool *pool;
11883         struct mlx5_aso_age_action *age_free = NULL;
11884         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11885
11886         MLX5_ASSERT(mng);
11887         /* Try to get the next free age action bit. */
11888         rte_spinlock_lock(&mng->free_sl);
11889         age_free = LIST_FIRST(&mng->free);
11890         if (age_free) {
11891                 LIST_REMOVE(age_free, next);
11892         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
11893                 rte_spinlock_unlock(&mng->free_sl);
11894                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
11895                                    NULL, "failed to create ASO age pool");
11896                 return 0; /* 0 is an error. */
11897         }
11898         rte_spinlock_unlock(&mng->free_sl);
11899         pool = container_of
11900           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
11901                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
11902                                                                        actions);
11903         if (!age_free->dr_action) {
11904                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
11905                                                  error);
11906
11907                 if (reg_c < 0) {
11908                         rte_flow_error_set(error, rte_errno,
11909                                            RTE_FLOW_ERROR_TYPE_ACTION,
11910                                            NULL, "failed to get reg_c "
11911                                            "for ASO flow hit");
11912                         return 0; /* 0 is an error. */
11913                 }
11914 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
11915                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
11916                                 (priv->sh->rx_domain,
11917                                  pool->flow_hit_aso_obj->obj, age_free->offset,
11918                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
11919                                  (reg_c - REG_C_0));
11920 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
11921                 if (!age_free->dr_action) {
11922                         rte_errno = errno;
11923                         rte_spinlock_lock(&mng->free_sl);
11924                         LIST_INSERT_HEAD(&mng->free, age_free, next);
11925                         rte_spinlock_unlock(&mng->free_sl);
11926                         rte_flow_error_set(error, rte_errno,
11927                                            RTE_FLOW_ERROR_TYPE_ACTION,
11928                                            NULL, "failed to create ASO "
11929                                            "flow hit action");
11930                         return 0; /* 0 is an error. */
11931                 }
11932         }
11933         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
11934         return pool->index | ((age_free->offset + 1) << 16);
11935 }
11936
11937 /**
11938  * Initialize flow ASO age parameters.
11939  *
11940  * @param[in] dev
11941  *   Pointer to rte_eth_dev structure.
11942  * @param[in] age_idx
11943  *   Index of ASO age action.
11944  * @param[in] context
11945  *   Pointer to flow counter age context.
11946  * @param[in] timeout
11947  *   Aging timeout in seconds.
11948  *
11949  */
11950 static void
11951 flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
11952                             uint32_t age_idx,
11953                             void *context,
11954                             uint32_t timeout)
11955 {
11956         struct mlx5_aso_age_action *aso_age;
11957
11958         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
11959         MLX5_ASSERT(aso_age);
11960         aso_age->age_params.context = context;
11961         aso_age->age_params.timeout = timeout;
11962         aso_age->age_params.port_id = dev->data->port_id;
11963         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
11964                          __ATOMIC_RELAXED);
11965         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
11966                          __ATOMIC_RELAXED);
11967 }
11968
11969 static void
11970 flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
11971                                const struct rte_flow_item_integrity *value,
11972                                void *headers_m, void *headers_v)
11973 {
11974         if (mask->l4_ok) {
11975                 /* application l4_ok filter aggregates all hardware l4 filters
11976                  * therefore hw l4_checksum_ok must be implicitly added here.
11977                  */
11978                 struct rte_flow_item_integrity local_item;
11979
11980                 local_item.l4_csum_ok = 1;
11981                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
11982                          local_item.l4_csum_ok);
11983                 if (value->l4_ok) {
11984                         /* application l4_ok = 1 matches sets both hw flags
11985                          * l4_ok and l4_checksum_ok flags to 1.
11986                          */
11987                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11988                                  l4_checksum_ok, local_item.l4_csum_ok);
11989                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok,
11990                                  mask->l4_ok);
11991                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok,
11992                                  value->l4_ok);
11993                 } else {
11994                         /* application l4_ok = 0 matches on hw flag
11995                          * l4_checksum_ok = 0 only.
11996                          */
11997                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11998                                  l4_checksum_ok, 0);
11999                 }
12000         } else if (mask->l4_csum_ok) {
12001                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
12002                          mask->l4_csum_ok);
12003                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
12004                          value->l4_csum_ok);
12005         }
12006 }
12007
12008 static void
12009 flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
12010                                const struct rte_flow_item_integrity *value,
12011                                void *headers_m, void *headers_v,
12012                                bool is_ipv4)
12013 {
12014         if (mask->l3_ok) {
12015                 /* application l3_ok filter aggregates all hardware l3 filters
12016                  * therefore hw ipv4_checksum_ok must be implicitly added here.
12017                  */
12018                 struct rte_flow_item_integrity local_item;
12019
12020                 local_item.ipv4_csum_ok = !!is_ipv4;
12021                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
12022                          local_item.ipv4_csum_ok);
12023                 if (value->l3_ok) {
12024                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12025                                  ipv4_checksum_ok, local_item.ipv4_csum_ok);
12026                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok,
12027                                  mask->l3_ok);
12028                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
12029                                  value->l3_ok);
12030                 } else {
12031                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12032                                  ipv4_checksum_ok, 0);
12033                 }
12034         } else if (mask->ipv4_csum_ok) {
12035                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
12036                          mask->ipv4_csum_ok);
12037                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
12038                          value->ipv4_csum_ok);
12039         }
12040 }
12041
12042 static void
12043 flow_dv_translate_item_integrity(void *matcher, void *key,
12044                                  const struct rte_flow_item *head_item,
12045                                  const struct rte_flow_item *integrity_item)
12046 {
12047         const struct rte_flow_item_integrity *mask = integrity_item->mask;
12048         const struct rte_flow_item_integrity *value = integrity_item->spec;
12049         const struct rte_flow_item *tunnel_item, *end_item, *item;
12050         void *headers_m;
12051         void *headers_v;
12052         uint32_t l3_protocol;
12053
12054         if (!value)
12055                 return;
12056         if (!mask)
12057                 mask = &rte_flow_item_integrity_mask;
12058         if (value->level > 1) {
12059                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12060                                          inner_headers);
12061                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
12062         } else {
12063                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12064                                          outer_headers);
12065                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
12066         }
12067         tunnel_item = mlx5_flow_find_tunnel_item(head_item);
12068         if (value->level > 1) {
12069                 /* tunnel item was verified during the item validation */
12070                 item = tunnel_item;
12071                 end_item = mlx5_find_end_item(tunnel_item);
12072         } else {
12073                 item = head_item;
12074                 end_item = tunnel_item ? tunnel_item :
12075                            mlx5_find_end_item(integrity_item);
12076         }
12077         l3_protocol = mask->l3_ok ?
12078                       mlx5_flow_locate_proto_l3(&item, end_item) : 0;
12079         flow_dv_translate_integrity_l3(mask, value, headers_m, headers_v,
12080                                        l3_protocol == RTE_ETHER_TYPE_IPV4);
12081         flow_dv_translate_integrity_l4(mask, value, headers_m, headers_v);
12082 }
12083
12084 /**
12085  * Prepares DV flow counter with aging configuration.
12086  * Gets it by index when exists, creates a new one when doesn't.
12087  *
12088  * @param[in] dev
12089  *   Pointer to rte_eth_dev structure.
12090  * @param[in] dev_flow
12091  *   Pointer to the mlx5_flow.
12092  * @param[in, out] flow
12093  *   Pointer to the sub flow.
12094  * @param[in] count
12095  *   Pointer to the counter action configuration.
12096  * @param[in] age
12097  *   Pointer to the aging action configuration.
12098  * @param[out] error
12099  *   Pointer to the error structure.
12100  *
12101  * @return
12102  *   Pointer to the counter, NULL otherwise.
12103  */
12104 static struct mlx5_flow_counter *
12105 flow_dv_prepare_counter(struct rte_eth_dev *dev,
12106                         struct mlx5_flow *dev_flow,
12107                         struct rte_flow *flow,
12108                         const struct rte_flow_action_count *count,
12109                         const struct rte_flow_action_age *age,
12110                         struct rte_flow_error *error)
12111 {
12112         if (!flow->counter) {
12113                 flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
12114                                                                  count, age);
12115                 if (!flow->counter) {
12116                         rte_flow_error_set(error, rte_errno,
12117                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12118                                            "cannot create counter object.");
12119                         return NULL;
12120                 }
12121         }
12122         return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
12123 }
12124
12125 /*
12126  * Release an ASO CT action by its own device.
12127  *
12128  * @param[in] dev
12129  *   Pointer to the Ethernet device structure.
12130  * @param[in] idx
12131  *   Index of ASO CT action to release.
12132  *
12133  * @return
12134  *   0 when CT action was removed, otherwise the number of references.
12135  */
12136 static inline int
12137 flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx)
12138 {
12139         struct mlx5_priv *priv = dev->data->dev_private;
12140         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12141         uint32_t ret;
12142         struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12143         enum mlx5_aso_ct_state state =
12144                         __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
12145
12146         /* Cannot release when CT is in the ASO SQ. */
12147         if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
12148                 return -1;
12149         ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED);
12150         if (!ret) {
12151                 if (ct->dr_action_orig) {
12152 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12153                         claim_zero(mlx5_glue->destroy_flow_action
12154                                         (ct->dr_action_orig));
12155 #endif
12156                         ct->dr_action_orig = NULL;
12157                 }
12158                 if (ct->dr_action_rply) {
12159 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12160                         claim_zero(mlx5_glue->destroy_flow_action
12161                                         (ct->dr_action_rply));
12162 #endif
12163                         ct->dr_action_rply = NULL;
12164                 }
12165                 /* Clear the state to free, no need in 1st allocation. */
12166                 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE);
12167                 rte_spinlock_lock(&mng->ct_sl);
12168                 LIST_INSERT_HEAD(&mng->free_cts, ct, next);
12169                 rte_spinlock_unlock(&mng->ct_sl);
12170         }
12171         return (int)ret;
12172 }
12173
12174 static inline int
12175 flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx)
12176 {
12177         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
12178         uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
12179         struct rte_eth_dev *owndev = &rte_eth_devices[owner];
12180         RTE_SET_USED(dev);
12181
12182         MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
12183         if (dev->data->dev_started != 1)
12184                 return -1;
12185         return flow_dv_aso_ct_dev_release(owndev, idx);
12186 }
12187
12188 /*
12189  * Resize the ASO CT pools array by 64 pools.
12190  *
12191  * @param[in] dev
12192  *   Pointer to the Ethernet device structure.
12193  *
12194  * @return
12195  *   0 on success, otherwise negative errno value and rte_errno is set.
12196  */
12197 static int
12198 flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev)
12199 {
12200         struct mlx5_priv *priv = dev->data->dev_private;
12201         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12202         void *old_pools = mng->pools;
12203         /* Magic number now, need a macro. */
12204         uint32_t resize = mng->n + 64;
12205         uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize;
12206         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
12207
12208         if (!pools) {
12209                 rte_errno = ENOMEM;
12210                 return -rte_errno;
12211         }
12212         rte_rwlock_write_lock(&mng->resize_rwl);
12213         /* ASO SQ/QP was already initialized in the startup. */
12214         if (old_pools) {
12215                 /* Realloc could be an alternative choice. */
12216                 rte_memcpy(pools, old_pools,
12217                            mng->n * sizeof(struct mlx5_aso_ct_pool *));
12218                 mlx5_free(old_pools);
12219         }
12220         mng->n = resize;
12221         mng->pools = pools;
12222         rte_rwlock_write_unlock(&mng->resize_rwl);
12223         return 0;
12224 }
12225
12226 /*
12227  * Create and initialize a new ASO CT pool.
12228  *
12229  * @param[in] dev
12230  *   Pointer to the Ethernet device structure.
12231  * @param[out] ct_free
12232  *   Where to put the pointer of a new CT action.
12233  *
12234  * @return
12235  *   The CT actions pool pointer and @p ct_free is set on success,
12236  *   NULL otherwise and rte_errno is set.
12237  */
12238 static struct mlx5_aso_ct_pool *
12239 flow_dv_ct_pool_create(struct rte_eth_dev *dev,
12240                        struct mlx5_aso_ct_action **ct_free)
12241 {
12242         struct mlx5_priv *priv = dev->data->dev_private;
12243         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12244         struct mlx5_aso_ct_pool *pool = NULL;
12245         struct mlx5_devx_obj *obj = NULL;
12246         uint32_t i;
12247         uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
12248
12249         obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->ctx,
12250                                                 priv->sh->pdn, log_obj_size);
12251         if (!obj) {
12252                 rte_errno = ENODATA;
12253                 DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
12254                 return NULL;
12255         }
12256         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
12257         if (!pool) {
12258                 rte_errno = ENOMEM;
12259                 claim_zero(mlx5_devx_cmd_destroy(obj));
12260                 return NULL;
12261         }
12262         pool->devx_obj = obj;
12263         pool->index = mng->next;
12264         /* Resize pools array if there is no room for the new pool in it. */
12265         if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) {
12266                 claim_zero(mlx5_devx_cmd_destroy(obj));
12267                 mlx5_free(pool);
12268                 return NULL;
12269         }
12270         mng->pools[pool->index] = pool;
12271         mng->next++;
12272         /* Assign the first action in the new pool, the rest go to free list. */
12273         *ct_free = &pool->actions[0];
12274         /* Lock outside, the list operation is safe here. */
12275         for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
12276                 /* refcnt is 0 when allocating the memory. */
12277                 pool->actions[i].offset = i;
12278                 LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next);
12279         }
12280         return pool;
12281 }
12282
12283 /*
12284  * Allocate a ASO CT action from free list.
12285  *
12286  * @param[in] dev
12287  *   Pointer to the Ethernet device structure.
12288  * @param[out] error
12289  *   Pointer to the error structure.
12290  *
12291  * @return
12292  *   Index to ASO CT action on success, 0 otherwise and rte_errno is set.
12293  */
12294 static uint32_t
12295 flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12296 {
12297         struct mlx5_priv *priv = dev->data->dev_private;
12298         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12299         struct mlx5_aso_ct_action *ct = NULL;
12300         struct mlx5_aso_ct_pool *pool;
12301         uint8_t reg_c;
12302         uint32_t ct_idx;
12303
12304         MLX5_ASSERT(mng);
12305         if (!priv->config.devx) {
12306                 rte_errno = ENOTSUP;
12307                 return 0;
12308         }
12309         /* Get a free CT action, if no, a new pool will be created. */
12310         rte_spinlock_lock(&mng->ct_sl);
12311         ct = LIST_FIRST(&mng->free_cts);
12312         if (ct) {
12313                 LIST_REMOVE(ct, next);
12314         } else if (!flow_dv_ct_pool_create(dev, &ct)) {
12315                 rte_spinlock_unlock(&mng->ct_sl);
12316                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12317                                    NULL, "failed to create ASO CT pool");
12318                 return 0;
12319         }
12320         rte_spinlock_unlock(&mng->ct_sl);
12321         pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
12322         ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
12323         /* 0: inactive, 1: created, 2+: used by flows. */
12324         __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
12325         reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
12326         if (!ct->dr_action_orig) {
12327 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12328                 ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso
12329                         (priv->sh->rx_domain, pool->devx_obj->obj,
12330                          ct->offset,
12331                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR,
12332                          reg_c - REG_C_0);
12333 #else
12334                 RTE_SET_USED(reg_c);
12335 #endif
12336                 if (!ct->dr_action_orig) {
12337                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12338                         rte_flow_error_set(error, rte_errno,
12339                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12340                                            "failed to create ASO CT action");
12341                         return 0;
12342                 }
12343         }
12344         if (!ct->dr_action_rply) {
12345 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12346                 ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso
12347                         (priv->sh->rx_domain, pool->devx_obj->obj,
12348                          ct->offset,
12349                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER,
12350                          reg_c - REG_C_0);
12351 #endif
12352                 if (!ct->dr_action_rply) {
12353                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12354                         rte_flow_error_set(error, rte_errno,
12355                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12356                                            "failed to create ASO CT action");
12357                         return 0;
12358                 }
12359         }
12360         return ct_idx;
12361 }
12362
12363 /*
12364  * Create a conntrack object with context and actions by using ASO mechanism.
12365  *
12366  * @param[in] dev
12367  *   Pointer to rte_eth_dev structure.
12368  * @param[in] pro
12369  *   Pointer to conntrack information profile.
12370  * @param[out] error
12371  *   Pointer to the error structure.
12372  *
12373  * @return
12374  *   Index to conntrack object on success, 0 otherwise.
12375  */
12376 static uint32_t
12377 flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
12378                                    const struct rte_flow_action_conntrack *pro,
12379                                    struct rte_flow_error *error)
12380 {
12381         struct mlx5_priv *priv = dev->data->dev_private;
12382         struct mlx5_dev_ctx_shared *sh = priv->sh;
12383         struct mlx5_aso_ct_action *ct;
12384         uint32_t idx;
12385
12386         if (!sh->ct_aso_en)
12387                 return rte_flow_error_set(error, ENOTSUP,
12388                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12389                                           "Connection is not supported");
12390         idx = flow_dv_aso_ct_alloc(dev, error);
12391         if (!idx)
12392                 return rte_flow_error_set(error, rte_errno,
12393                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12394                                           "Failed to allocate CT object");
12395         ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12396         if (mlx5_aso_ct_update_by_wqe(sh, ct, pro))
12397                 return rte_flow_error_set(error, EBUSY,
12398                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12399                                           "Failed to update CT");
12400         ct->is_original = !!pro->is_original_dir;
12401         ct->peer = pro->peer_port;
12402         return idx;
12403 }
12404
12405 /**
12406  * Fill the flow with DV spec, lock free
12407  * (mutex should be acquired by caller).
12408  *
12409  * @param[in] dev
12410  *   Pointer to rte_eth_dev structure.
12411  * @param[in, out] dev_flow
12412  *   Pointer to the sub flow.
12413  * @param[in] attr
12414  *   Pointer to the flow attributes.
12415  * @param[in] items
12416  *   Pointer to the list of items.
12417  * @param[in] actions
12418  *   Pointer to the list of actions.
12419  * @param[out] error
12420  *   Pointer to the error structure.
12421  *
12422  * @return
12423  *   0 on success, a negative errno value otherwise and rte_errno is set.
12424  */
12425 static int
12426 flow_dv_translate(struct rte_eth_dev *dev,
12427                   struct mlx5_flow *dev_flow,
12428                   const struct rte_flow_attr *attr,
12429                   const struct rte_flow_item items[],
12430                   const struct rte_flow_action actions[],
12431                   struct rte_flow_error *error)
12432 {
12433         struct mlx5_priv *priv = dev->data->dev_private;
12434         struct mlx5_dev_config *dev_conf = &priv->config;
12435         struct rte_flow *flow = dev_flow->flow;
12436         struct mlx5_flow_handle *handle = dev_flow->handle;
12437         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12438         struct mlx5_flow_rss_desc *rss_desc;
12439         uint64_t item_flags = 0;
12440         uint64_t last_item = 0;
12441         uint64_t action_flags = 0;
12442         struct mlx5_flow_dv_matcher matcher = {
12443                 .mask = {
12444                         .size = sizeof(matcher.mask.buf),
12445                 },
12446         };
12447         int actions_n = 0;
12448         bool actions_end = false;
12449         union {
12450                 struct mlx5_flow_dv_modify_hdr_resource res;
12451                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
12452                             sizeof(struct mlx5_modification_cmd) *
12453                             (MLX5_MAX_MODIFY_NUM + 1)];
12454         } mhdr_dummy;
12455         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
12456         const struct rte_flow_action_count *count = NULL;
12457         const struct rte_flow_action_age *non_shared_age = NULL;
12458         union flow_dv_attr flow_attr = { .attr = 0 };
12459         uint32_t tag_be;
12460         union mlx5_flow_tbl_key tbl_key;
12461         uint32_t modify_action_position = UINT32_MAX;
12462         void *match_mask = matcher.mask.buf;
12463         void *match_value = dev_flow->dv.value.buf;
12464         uint8_t next_protocol = 0xff;
12465         struct rte_vlan_hdr vlan = { 0 };
12466         struct mlx5_flow_dv_dest_array_resource mdest_res;
12467         struct mlx5_flow_dv_sample_resource sample_res;
12468         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
12469         const struct rte_flow_action_sample *sample = NULL;
12470         struct mlx5_flow_sub_actions_list *sample_act;
12471         uint32_t sample_act_pos = UINT32_MAX;
12472         uint32_t age_act_pos = UINT32_MAX;
12473         uint32_t num_of_dest = 0;
12474         int tmp_actions_n = 0;
12475         uint32_t table;
12476         int ret = 0;
12477         const struct mlx5_flow_tunnel *tunnel = NULL;
12478         struct flow_grp_info grp_info = {
12479                 .external = !!dev_flow->external,
12480                 .transfer = !!attr->transfer,
12481                 .fdb_def_rule = !!priv->fdb_def_rule,
12482                 .skip_scale = dev_flow->skip_scale &
12483                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
12484                 .std_tbl_fix = true,
12485         };
12486         const struct rte_flow_item *head_item = items;
12487
12488         if (!wks)
12489                 return rte_flow_error_set(error, ENOMEM,
12490                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12491                                           NULL,
12492                                           "failed to push flow workspace");
12493         rss_desc = &wks->rss_desc;
12494         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
12495         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
12496         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12497                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12498         /* update normal path action resource into last index of array */
12499         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
12500         if (is_tunnel_offload_active(dev)) {
12501                 if (dev_flow->tunnel) {
12502                         RTE_VERIFY(dev_flow->tof_type ==
12503                                    MLX5_TUNNEL_OFFLOAD_MISS_RULE);
12504                         tunnel = dev_flow->tunnel;
12505                 } else {
12506                         tunnel = mlx5_get_tof(items, actions,
12507                                               &dev_flow->tof_type);
12508                         dev_flow->tunnel = tunnel;
12509                 }
12510                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
12511                                         (dev, attr, tunnel, dev_flow->tof_type);
12512         }
12513         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12514                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12515         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
12516                                        &grp_info, error);
12517         if (ret)
12518                 return ret;
12519         dev_flow->dv.group = table;
12520         if (attr->transfer)
12521                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
12522         /* number of actions must be set to 0 in case of dirty stack. */
12523         mhdr_res->actions_num = 0;
12524         if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
12525                 /*
12526                  * do not add decap action if match rule drops packet
12527                  * HW rejects rules with decap & drop
12528                  *
12529                  * if tunnel match rule was inserted before matching tunnel set
12530                  * rule flow table used in the match rule must be registered.
12531                  * current implementation handles that in the
12532                  * flow_dv_match_register() at the function end.
12533                  */
12534                 bool add_decap = true;
12535                 const struct rte_flow_action *ptr = actions;
12536
12537                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
12538                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
12539                                 add_decap = false;
12540                                 break;
12541                         }
12542                 }
12543                 if (add_decap) {
12544                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12545                                                            attr->transfer,
12546                                                            error))
12547                                 return -rte_errno;
12548                         dev_flow->dv.actions[actions_n++] =
12549                                         dev_flow->dv.encap_decap->action;
12550                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12551                 }
12552         }
12553         for (; !actions_end ; actions++) {
12554                 const struct rte_flow_action_queue *queue;
12555                 const struct rte_flow_action_rss *rss;
12556                 const struct rte_flow_action *action = actions;
12557                 const uint8_t *rss_key;
12558                 struct mlx5_flow_tbl_resource *tbl;
12559                 struct mlx5_aso_age_action *age_act;
12560                 struct mlx5_flow_counter *cnt_act;
12561                 uint32_t port_id = 0;
12562                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
12563                 int action_type = actions->type;
12564                 const struct rte_flow_action *found_action = NULL;
12565                 uint32_t jump_group = 0;
12566                 uint32_t owner_idx;
12567                 struct mlx5_aso_ct_action *ct;
12568
12569                 if (!mlx5_flow_os_action_supported(action_type))
12570                         return rte_flow_error_set(error, ENOTSUP,
12571                                                   RTE_FLOW_ERROR_TYPE_ACTION,
12572                                                   actions,
12573                                                   "action not supported");
12574                 switch (action_type) {
12575                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
12576                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
12577                         break;
12578                 case RTE_FLOW_ACTION_TYPE_VOID:
12579                         break;
12580                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
12581                         if (flow_dv_translate_action_port_id(dev, action,
12582                                                              &port_id, error))
12583                                 return -rte_errno;
12584                         port_id_resource.port_id = port_id;
12585                         MLX5_ASSERT(!handle->rix_port_id_action);
12586                         if (flow_dv_port_id_action_resource_register
12587                             (dev, &port_id_resource, dev_flow, error))
12588                                 return -rte_errno;
12589                         dev_flow->dv.actions[actions_n++] =
12590                                         dev_flow->dv.port_id_action->action;
12591                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12592                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
12593                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12594                         num_of_dest++;
12595                         break;
12596                 case RTE_FLOW_ACTION_TYPE_FLAG:
12597                         action_flags |= MLX5_FLOW_ACTION_FLAG;
12598                         dev_flow->handle->mark = 1;
12599                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12600                                 struct rte_flow_action_mark mark = {
12601                                         .id = MLX5_FLOW_MARK_DEFAULT,
12602                                 };
12603
12604                                 if (flow_dv_convert_action_mark(dev, &mark,
12605                                                                 mhdr_res,
12606                                                                 error))
12607                                         return -rte_errno;
12608                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12609                                 break;
12610                         }
12611                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
12612                         /*
12613                          * Only one FLAG or MARK is supported per device flow
12614                          * right now. So the pointer to the tag resource must be
12615                          * zero before the register process.
12616                          */
12617                         MLX5_ASSERT(!handle->dvh.rix_tag);
12618                         if (flow_dv_tag_resource_register(dev, tag_be,
12619                                                           dev_flow, error))
12620                                 return -rte_errno;
12621                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12622                         dev_flow->dv.actions[actions_n++] =
12623                                         dev_flow->dv.tag_resource->action;
12624                         break;
12625                 case RTE_FLOW_ACTION_TYPE_MARK:
12626                         action_flags |= MLX5_FLOW_ACTION_MARK;
12627                         dev_flow->handle->mark = 1;
12628                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12629                                 const struct rte_flow_action_mark *mark =
12630                                         (const struct rte_flow_action_mark *)
12631                                                 actions->conf;
12632
12633                                 if (flow_dv_convert_action_mark(dev, mark,
12634                                                                 mhdr_res,
12635                                                                 error))
12636                                         return -rte_errno;
12637                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12638                                 break;
12639                         }
12640                         /* Fall-through */
12641                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
12642                         /* Legacy (non-extensive) MARK action. */
12643                         tag_be = mlx5_flow_mark_set
12644                               (((const struct rte_flow_action_mark *)
12645                                (actions->conf))->id);
12646                         MLX5_ASSERT(!handle->dvh.rix_tag);
12647                         if (flow_dv_tag_resource_register(dev, tag_be,
12648                                                           dev_flow, error))
12649                                 return -rte_errno;
12650                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12651                         dev_flow->dv.actions[actions_n++] =
12652                                         dev_flow->dv.tag_resource->action;
12653                         break;
12654                 case RTE_FLOW_ACTION_TYPE_SET_META:
12655                         if (flow_dv_convert_action_set_meta
12656                                 (dev, mhdr_res, attr,
12657                                  (const struct rte_flow_action_set_meta *)
12658                                   actions->conf, error))
12659                                 return -rte_errno;
12660                         action_flags |= MLX5_FLOW_ACTION_SET_META;
12661                         break;
12662                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
12663                         if (flow_dv_convert_action_set_tag
12664                                 (dev, mhdr_res,
12665                                  (const struct rte_flow_action_set_tag *)
12666                                   actions->conf, error))
12667                                 return -rte_errno;
12668                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12669                         break;
12670                 case RTE_FLOW_ACTION_TYPE_DROP:
12671                         action_flags |= MLX5_FLOW_ACTION_DROP;
12672                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
12673                         break;
12674                 case RTE_FLOW_ACTION_TYPE_QUEUE:
12675                         queue = actions->conf;
12676                         rss_desc->queue_num = 1;
12677                         rss_desc->queue[0] = queue->index;
12678                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
12679                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
12680                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
12681                         num_of_dest++;
12682                         break;
12683                 case RTE_FLOW_ACTION_TYPE_RSS:
12684                         rss = actions->conf;
12685                         memcpy(rss_desc->queue, rss->queue,
12686                                rss->queue_num * sizeof(uint16_t));
12687                         rss_desc->queue_num = rss->queue_num;
12688                         /* NULL RSS key indicates default RSS key. */
12689                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
12690                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12691                         /*
12692                          * rss->level and rss.types should be set in advance
12693                          * when expanding items for RSS.
12694                          */
12695                         action_flags |= MLX5_FLOW_ACTION_RSS;
12696                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
12697                                 MLX5_FLOW_FATE_SHARED_RSS :
12698                                 MLX5_FLOW_FATE_QUEUE;
12699                         break;
12700                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
12701                         flow->age = (uint32_t)(uintptr_t)(action->conf);
12702                         age_act = flow_aso_age_get_by_idx(dev, flow->age);
12703                         __atomic_fetch_add(&age_act->refcnt, 1,
12704                                            __ATOMIC_RELAXED);
12705                         age_act_pos = actions_n++;
12706                         action_flags |= MLX5_FLOW_ACTION_AGE;
12707                         break;
12708                 case RTE_FLOW_ACTION_TYPE_AGE:
12709                         non_shared_age = action->conf;
12710                         age_act_pos = actions_n++;
12711                         action_flags |= MLX5_FLOW_ACTION_AGE;
12712                         break;
12713                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
12714                         flow->counter = (uint32_t)(uintptr_t)(action->conf);
12715                         cnt_act = flow_dv_counter_get_by_idx(dev, flow->counter,
12716                                                              NULL);
12717                         __atomic_fetch_add(&cnt_act->shared_info.refcnt, 1,
12718                                            __ATOMIC_RELAXED);
12719                         /* Save information first, will apply later. */
12720                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12721                         break;
12722                 case RTE_FLOW_ACTION_TYPE_COUNT:
12723                         if (!dev_conf->devx) {
12724                                 return rte_flow_error_set
12725                                               (error, ENOTSUP,
12726                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12727                                                NULL,
12728                                                "count action not supported");
12729                         }
12730                         /* Save information first, will apply later. */
12731                         count = action->conf;
12732                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12733                         break;
12734                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
12735                         dev_flow->dv.actions[actions_n++] =
12736                                                 priv->sh->pop_vlan_action;
12737                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
12738                         break;
12739                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
12740                         if (!(action_flags &
12741                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
12742                                 flow_dev_get_vlan_info_from_items(items, &vlan);
12743                         vlan.eth_proto = rte_be_to_cpu_16
12744                              ((((const struct rte_flow_action_of_push_vlan *)
12745                                                    actions->conf)->ethertype));
12746                         found_action = mlx5_flow_find_action
12747                                         (actions + 1,
12748                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
12749                         if (found_action)
12750                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12751                         found_action = mlx5_flow_find_action
12752                                         (actions + 1,
12753                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
12754                         if (found_action)
12755                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12756                         if (flow_dv_create_action_push_vlan
12757                                             (dev, attr, &vlan, dev_flow, error))
12758                                 return -rte_errno;
12759                         dev_flow->dv.actions[actions_n++] =
12760                                         dev_flow->dv.push_vlan_res->action;
12761                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
12762                         break;
12763                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
12764                         /* of_vlan_push action handled this action */
12765                         MLX5_ASSERT(action_flags &
12766                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
12767                         break;
12768                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
12769                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
12770                                 break;
12771                         flow_dev_get_vlan_info_from_items(items, &vlan);
12772                         mlx5_update_vlan_vid_pcp(actions, &vlan);
12773                         /* If no VLAN push - this is a modify header action */
12774                         if (flow_dv_convert_action_modify_vlan_vid
12775                                                 (mhdr_res, actions, error))
12776                                 return -rte_errno;
12777                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
12778                         break;
12779                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
12780                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
12781                         if (flow_dv_create_action_l2_encap(dev, actions,
12782                                                            dev_flow,
12783                                                            attr->transfer,
12784                                                            error))
12785                                 return -rte_errno;
12786                         dev_flow->dv.actions[actions_n++] =
12787                                         dev_flow->dv.encap_decap->action;
12788                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12789                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12790                                 sample_act->action_flags |=
12791                                                         MLX5_FLOW_ACTION_ENCAP;
12792                         break;
12793                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
12794                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
12795                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12796                                                            attr->transfer,
12797                                                            error))
12798                                 return -rte_errno;
12799                         dev_flow->dv.actions[actions_n++] =
12800                                         dev_flow->dv.encap_decap->action;
12801                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12802                         break;
12803                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
12804                         /* Handle encap with preceding decap. */
12805                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
12806                                 if (flow_dv_create_action_raw_encap
12807                                         (dev, actions, dev_flow, attr, error))
12808                                         return -rte_errno;
12809                                 dev_flow->dv.actions[actions_n++] =
12810                                         dev_flow->dv.encap_decap->action;
12811                         } else {
12812                                 /* Handle encap without preceding decap. */
12813                                 if (flow_dv_create_action_l2_encap
12814                                     (dev, actions, dev_flow, attr->transfer,
12815                                      error))
12816                                         return -rte_errno;
12817                                 dev_flow->dv.actions[actions_n++] =
12818                                         dev_flow->dv.encap_decap->action;
12819                         }
12820                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12821                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12822                                 sample_act->action_flags |=
12823                                                         MLX5_FLOW_ACTION_ENCAP;
12824                         break;
12825                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
12826                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
12827                                 ;
12828                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
12829                                 if (flow_dv_create_action_l2_decap
12830                                     (dev, dev_flow, attr->transfer, error))
12831                                         return -rte_errno;
12832                                 dev_flow->dv.actions[actions_n++] =
12833                                         dev_flow->dv.encap_decap->action;
12834                         }
12835                         /* If decap is followed by encap, handle it at encap. */
12836                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12837                         break;
12838                 case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
12839                         dev_flow->dv.actions[actions_n++] =
12840                                 (void *)(uintptr_t)action->conf;
12841                         action_flags |= MLX5_FLOW_ACTION_JUMP;
12842                         break;
12843                 case RTE_FLOW_ACTION_TYPE_JUMP:
12844                         jump_group = ((const struct rte_flow_action_jump *)
12845                                                         action->conf)->group;
12846                         grp_info.std_tbl_fix = 0;
12847                         if (dev_flow->skip_scale &
12848                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
12849                                 grp_info.skip_scale = 1;
12850                         else
12851                                 grp_info.skip_scale = 0;
12852                         ret = mlx5_flow_group_to_table(dev, tunnel,
12853                                                        jump_group,
12854                                                        &table,
12855                                                        &grp_info, error);
12856                         if (ret)
12857                                 return ret;
12858                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
12859                                                        attr->transfer,
12860                                                        !!dev_flow->external,
12861                                                        tunnel, jump_group, 0,
12862                                                        0, error);
12863                         if (!tbl)
12864                                 return rte_flow_error_set
12865                                                 (error, errno,
12866                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12867                                                  NULL,
12868                                                  "cannot create jump action.");
12869                         if (flow_dv_jump_tbl_resource_register
12870                             (dev, tbl, dev_flow, error)) {
12871                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
12872                                 return rte_flow_error_set
12873                                                 (error, errno,
12874                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12875                                                  NULL,
12876                                                  "cannot create jump action.");
12877                         }
12878                         dev_flow->dv.actions[actions_n++] =
12879                                         dev_flow->dv.jump->action;
12880                         action_flags |= MLX5_FLOW_ACTION_JUMP;
12881                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
12882                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
12883                         num_of_dest++;
12884                         break;
12885                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
12886                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
12887                         if (flow_dv_convert_action_modify_mac
12888                                         (mhdr_res, actions, error))
12889                                 return -rte_errno;
12890                         action_flags |= actions->type ==
12891                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
12892                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
12893                                         MLX5_FLOW_ACTION_SET_MAC_DST;
12894                         break;
12895                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
12896                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
12897                         if (flow_dv_convert_action_modify_ipv4
12898                                         (mhdr_res, actions, error))
12899                                 return -rte_errno;
12900                         action_flags |= actions->type ==
12901                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
12902                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
12903                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
12904                         break;
12905                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
12906                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
12907                         if (flow_dv_convert_action_modify_ipv6
12908                                         (mhdr_res, actions, error))
12909                                 return -rte_errno;
12910                         action_flags |= actions->type ==
12911                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
12912                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
12913                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
12914                         break;
12915                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
12916                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
12917                         if (flow_dv_convert_action_modify_tp
12918                                         (mhdr_res, actions, items,
12919                                          &flow_attr, dev_flow, !!(action_flags &
12920                                          MLX5_FLOW_ACTION_DECAP), error))
12921                                 return -rte_errno;
12922                         action_flags |= actions->type ==
12923                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
12924                                         MLX5_FLOW_ACTION_SET_TP_SRC :
12925                                         MLX5_FLOW_ACTION_SET_TP_DST;
12926                         break;
12927                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
12928                         if (flow_dv_convert_action_modify_dec_ttl
12929                                         (mhdr_res, items, &flow_attr, dev_flow,
12930                                          !!(action_flags &
12931                                          MLX5_FLOW_ACTION_DECAP), error))
12932                                 return -rte_errno;
12933                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
12934                         break;
12935                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
12936                         if (flow_dv_convert_action_modify_ttl
12937                                         (mhdr_res, actions, items, &flow_attr,
12938                                          dev_flow, !!(action_flags &
12939                                          MLX5_FLOW_ACTION_DECAP), error))
12940                                 return -rte_errno;
12941                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
12942                         break;
12943                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
12944                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
12945                         if (flow_dv_convert_action_modify_tcp_seq
12946                                         (mhdr_res, actions, error))
12947                                 return -rte_errno;
12948                         action_flags |= actions->type ==
12949                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
12950                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
12951                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
12952                         break;
12953
12954                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
12955                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
12956                         if (flow_dv_convert_action_modify_tcp_ack
12957                                         (mhdr_res, actions, error))
12958                                 return -rte_errno;
12959                         action_flags |= actions->type ==
12960                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
12961                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
12962                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
12963                         break;
12964                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
12965                         if (flow_dv_convert_action_set_reg
12966                                         (mhdr_res, actions, error))
12967                                 return -rte_errno;
12968                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12969                         break;
12970                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
12971                         if (flow_dv_convert_action_copy_mreg
12972                                         (dev, mhdr_res, actions, error))
12973                                 return -rte_errno;
12974                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12975                         break;
12976                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
12977                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
12978                         dev_flow->handle->fate_action =
12979                                         MLX5_FLOW_FATE_DEFAULT_MISS;
12980                         break;
12981                 case RTE_FLOW_ACTION_TYPE_METER:
12982                         if (!wks->fm)
12983                                 return rte_flow_error_set(error, rte_errno,
12984                                         RTE_FLOW_ERROR_TYPE_ACTION,
12985                                         NULL, "Failed to get meter in flow.");
12986                         /* Set the meter action. */
12987                         dev_flow->dv.actions[actions_n++] =
12988                                 wks->fm->meter_action;
12989                         action_flags |= MLX5_FLOW_ACTION_METER;
12990                         break;
12991                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
12992                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
12993                                                               actions, error))
12994                                 return -rte_errno;
12995                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
12996                         break;
12997                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
12998                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
12999                                                               actions, error))
13000                                 return -rte_errno;
13001                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
13002                         break;
13003                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
13004                         sample_act_pos = actions_n;
13005                         sample = (const struct rte_flow_action_sample *)
13006                                  action->conf;
13007                         actions_n++;
13008                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
13009                         /* put encap action into group if work with port id */
13010                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
13011                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
13012                                 sample_act->action_flags |=
13013                                                         MLX5_FLOW_ACTION_ENCAP;
13014                         break;
13015                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
13016                         if (flow_dv_convert_action_modify_field
13017                                         (dev, mhdr_res, actions, attr, error))
13018                                 return -rte_errno;
13019                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
13020                         break;
13021                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
13022                         owner_idx = (uint32_t)(uintptr_t)action->conf;
13023                         ct = flow_aso_ct_get_by_idx(dev, owner_idx);
13024                         if (!ct)
13025                                 return rte_flow_error_set(error, EINVAL,
13026                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13027                                                 NULL,
13028                                                 "Failed to get CT object.");
13029                         if (mlx5_aso_ct_available(priv->sh, ct))
13030                                 return rte_flow_error_set(error, rte_errno,
13031                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13032                                                 NULL,
13033                                                 "CT is unavailable.");
13034                         if (ct->is_original)
13035                                 dev_flow->dv.actions[actions_n] =
13036                                                         ct->dr_action_orig;
13037                         else
13038                                 dev_flow->dv.actions[actions_n] =
13039                                                         ct->dr_action_rply;
13040                         flow->indirect_type = MLX5_INDIRECT_ACTION_TYPE_CT;
13041                         flow->ct = owner_idx;
13042                         __atomic_fetch_add(&ct->refcnt, 1, __ATOMIC_RELAXED);
13043                         actions_n++;
13044                         action_flags |= MLX5_FLOW_ACTION_CT;
13045                         break;
13046                 case RTE_FLOW_ACTION_TYPE_END:
13047                         actions_end = true;
13048                         if (mhdr_res->actions_num) {
13049                                 /* create modify action if needed. */
13050                                 if (flow_dv_modify_hdr_resource_register
13051                                         (dev, mhdr_res, dev_flow, error))
13052                                         return -rte_errno;
13053                                 dev_flow->dv.actions[modify_action_position] =
13054                                         handle->dvh.modify_hdr->action;
13055                         }
13056                         /*
13057                          * Handle AGE and COUNT action by single HW counter
13058                          * when they are not shared.
13059                          */
13060                         if (action_flags & MLX5_FLOW_ACTION_AGE) {
13061                                 if ((non_shared_age &&
13062                                      count && !count->shared) ||
13063                                     !(priv->sh->flow_hit_aso_en &&
13064                                       (attr->group || attr->transfer))) {
13065                                         /* Creates age by counters. */
13066                                         cnt_act = flow_dv_prepare_counter
13067                                                                 (dev, dev_flow,
13068                                                                  flow, count,
13069                                                                  non_shared_age,
13070                                                                  error);
13071                                         if (!cnt_act)
13072                                                 return -rte_errno;
13073                                         dev_flow->dv.actions[age_act_pos] =
13074                                                                 cnt_act->action;
13075                                         break;
13076                                 }
13077                                 if (!flow->age && non_shared_age) {
13078                                         flow->age = flow_dv_aso_age_alloc
13079                                                                 (dev, error);
13080                                         if (!flow->age)
13081                                                 return -rte_errno;
13082                                         flow_dv_aso_age_params_init
13083                                                     (dev, flow->age,
13084                                                      non_shared_age->context ?
13085                                                      non_shared_age->context :
13086                                                      (void *)(uintptr_t)
13087                                                      (dev_flow->flow_idx),
13088                                                      non_shared_age->timeout);
13089                                 }
13090                                 age_act = flow_aso_age_get_by_idx(dev,
13091                                                                   flow->age);
13092                                 dev_flow->dv.actions[age_act_pos] =
13093                                                              age_act->dr_action;
13094                         }
13095                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
13096                                 /*
13097                                  * Create one count action, to be used
13098                                  * by all sub-flows.
13099                                  */
13100                                 cnt_act = flow_dv_prepare_counter(dev, dev_flow,
13101                                                                   flow, count,
13102                                                                   NULL, error);
13103                                 if (!cnt_act)
13104                                         return -rte_errno;
13105                                 dev_flow->dv.actions[actions_n++] =
13106                                                                 cnt_act->action;
13107                         }
13108                 default:
13109                         break;
13110                 }
13111                 if (mhdr_res->actions_num &&
13112                     modify_action_position == UINT32_MAX)
13113                         modify_action_position = actions_n++;
13114         }
13115         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
13116                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
13117                 int item_type = items->type;
13118
13119                 if (!mlx5_flow_os_item_supported(item_type))
13120                         return rte_flow_error_set(error, ENOTSUP,
13121                                                   RTE_FLOW_ERROR_TYPE_ITEM,
13122                                                   NULL, "item not supported");
13123                 switch (item_type) {
13124                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
13125                         flow_dv_translate_item_port_id
13126                                 (dev, match_mask, match_value, items, attr);
13127                         last_item = MLX5_FLOW_ITEM_PORT_ID;
13128                         break;
13129                 case RTE_FLOW_ITEM_TYPE_ETH:
13130                         flow_dv_translate_item_eth(match_mask, match_value,
13131                                                    items, tunnel,
13132                                                    dev_flow->dv.group);
13133                         matcher.priority = action_flags &
13134                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
13135                                         !dev_flow->external ?
13136                                         MLX5_PRIORITY_MAP_L3 :
13137                                         MLX5_PRIORITY_MAP_L2;
13138                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
13139                                              MLX5_FLOW_LAYER_OUTER_L2;
13140                         break;
13141                 case RTE_FLOW_ITEM_TYPE_VLAN:
13142                         flow_dv_translate_item_vlan(dev_flow,
13143                                                     match_mask, match_value,
13144                                                     items, tunnel,
13145                                                     dev_flow->dv.group);
13146                         matcher.priority = MLX5_PRIORITY_MAP_L2;
13147                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
13148                                               MLX5_FLOW_LAYER_INNER_VLAN) :
13149                                              (MLX5_FLOW_LAYER_OUTER_L2 |
13150                                               MLX5_FLOW_LAYER_OUTER_VLAN);
13151                         break;
13152                 case RTE_FLOW_ITEM_TYPE_IPV4:
13153                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13154                                                   &item_flags, &tunnel);
13155                         flow_dv_translate_item_ipv4(match_mask, match_value,
13156                                                     items, tunnel,
13157                                                     dev_flow->dv.group);
13158                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13159                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
13160                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
13161                         if (items->mask != NULL &&
13162                             ((const struct rte_flow_item_ipv4 *)
13163                              items->mask)->hdr.next_proto_id) {
13164                                 next_protocol =
13165                                         ((const struct rte_flow_item_ipv4 *)
13166                                          (items->spec))->hdr.next_proto_id;
13167                                 next_protocol &=
13168                                         ((const struct rte_flow_item_ipv4 *)
13169                                          (items->mask))->hdr.next_proto_id;
13170                         } else {
13171                                 /* Reset for inner layer. */
13172                                 next_protocol = 0xff;
13173                         }
13174                         break;
13175                 case RTE_FLOW_ITEM_TYPE_IPV6:
13176                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13177                                                   &item_flags, &tunnel);
13178                         flow_dv_translate_item_ipv6(match_mask, match_value,
13179                                                     items, tunnel,
13180                                                     dev_flow->dv.group);
13181                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13182                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
13183                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
13184                         if (items->mask != NULL &&
13185                             ((const struct rte_flow_item_ipv6 *)
13186                              items->mask)->hdr.proto) {
13187                                 next_protocol =
13188                                         ((const struct rte_flow_item_ipv6 *)
13189                                          items->spec)->hdr.proto;
13190                                 next_protocol &=
13191                                         ((const struct rte_flow_item_ipv6 *)
13192                                          items->mask)->hdr.proto;
13193                         } else {
13194                                 /* Reset for inner layer. */
13195                                 next_protocol = 0xff;
13196                         }
13197                         break;
13198                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
13199                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
13200                                                              match_value,
13201                                                              items, tunnel);
13202                         last_item = tunnel ?
13203                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
13204                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
13205                         if (items->mask != NULL &&
13206                             ((const struct rte_flow_item_ipv6_frag_ext *)
13207                              items->mask)->hdr.next_header) {
13208                                 next_protocol =
13209                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13210                                  items->spec)->hdr.next_header;
13211                                 next_protocol &=
13212                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13213                                  items->mask)->hdr.next_header;
13214                         } else {
13215                                 /* Reset for inner layer. */
13216                                 next_protocol = 0xff;
13217                         }
13218                         break;
13219                 case RTE_FLOW_ITEM_TYPE_TCP:
13220                         flow_dv_translate_item_tcp(match_mask, match_value,
13221                                                    items, tunnel);
13222                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13223                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
13224                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
13225                         break;
13226                 case RTE_FLOW_ITEM_TYPE_UDP:
13227                         flow_dv_translate_item_udp(match_mask, match_value,
13228                                                    items, tunnel);
13229                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13230                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
13231                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
13232                         break;
13233                 case RTE_FLOW_ITEM_TYPE_GRE:
13234                         flow_dv_translate_item_gre(match_mask, match_value,
13235                                                    items, tunnel);
13236                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13237                         last_item = MLX5_FLOW_LAYER_GRE;
13238                         break;
13239                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
13240                         flow_dv_translate_item_gre_key(match_mask,
13241                                                        match_value, items);
13242                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
13243                         break;
13244                 case RTE_FLOW_ITEM_TYPE_NVGRE:
13245                         flow_dv_translate_item_nvgre(match_mask, match_value,
13246                                                      items, tunnel);
13247                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13248                         last_item = MLX5_FLOW_LAYER_GRE;
13249                         break;
13250                 case RTE_FLOW_ITEM_TYPE_VXLAN:
13251                         flow_dv_translate_item_vxlan(dev, attr,
13252                                                      match_mask, match_value,
13253                                                      items, tunnel);
13254                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13255                         last_item = MLX5_FLOW_LAYER_VXLAN;
13256                         break;
13257                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
13258                         flow_dv_translate_item_vxlan_gpe(match_mask,
13259                                                          match_value, items,
13260                                                          tunnel);
13261                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13262                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
13263                         break;
13264                 case RTE_FLOW_ITEM_TYPE_GENEVE:
13265                         flow_dv_translate_item_geneve(match_mask, match_value,
13266                                                       items, tunnel);
13267                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13268                         last_item = MLX5_FLOW_LAYER_GENEVE;
13269                         break;
13270                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
13271                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
13272                                                           match_value,
13273                                                           items, error);
13274                         if (ret)
13275                                 return rte_flow_error_set(error, -ret,
13276                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13277                                         "cannot create GENEVE TLV option");
13278                         flow->geneve_tlv_option = 1;
13279                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
13280                         break;
13281                 case RTE_FLOW_ITEM_TYPE_MPLS:
13282                         flow_dv_translate_item_mpls(match_mask, match_value,
13283                                                     items, last_item, tunnel);
13284                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13285                         last_item = MLX5_FLOW_LAYER_MPLS;
13286                         break;
13287                 case RTE_FLOW_ITEM_TYPE_MARK:
13288                         flow_dv_translate_item_mark(dev, match_mask,
13289                                                     match_value, items);
13290                         last_item = MLX5_FLOW_ITEM_MARK;
13291                         break;
13292                 case RTE_FLOW_ITEM_TYPE_META:
13293                         flow_dv_translate_item_meta(dev, match_mask,
13294                                                     match_value, attr, items);
13295                         last_item = MLX5_FLOW_ITEM_METADATA;
13296                         break;
13297                 case RTE_FLOW_ITEM_TYPE_ICMP:
13298                         flow_dv_translate_item_icmp(match_mask, match_value,
13299                                                     items, tunnel);
13300                         last_item = MLX5_FLOW_LAYER_ICMP;
13301                         break;
13302                 case RTE_FLOW_ITEM_TYPE_ICMP6:
13303                         flow_dv_translate_item_icmp6(match_mask, match_value,
13304                                                       items, tunnel);
13305                         last_item = MLX5_FLOW_LAYER_ICMP6;
13306                         break;
13307                 case RTE_FLOW_ITEM_TYPE_TAG:
13308                         flow_dv_translate_item_tag(dev, match_mask,
13309                                                    match_value, items);
13310                         last_item = MLX5_FLOW_ITEM_TAG;
13311                         break;
13312                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
13313                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
13314                                                         match_value, items);
13315                         last_item = MLX5_FLOW_ITEM_TAG;
13316                         break;
13317                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
13318                         flow_dv_translate_item_tx_queue(dev, match_mask,
13319                                                         match_value,
13320                                                         items);
13321                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
13322                         break;
13323                 case RTE_FLOW_ITEM_TYPE_GTP:
13324                         flow_dv_translate_item_gtp(match_mask, match_value,
13325                                                    items, tunnel);
13326                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13327                         last_item = MLX5_FLOW_LAYER_GTP;
13328                         break;
13329                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
13330                         ret = flow_dv_translate_item_gtp_psc(match_mask,
13331                                                           match_value,
13332                                                           items);
13333                         if (ret)
13334                                 return rte_flow_error_set(error, -ret,
13335                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13336                                         "cannot create GTP PSC item");
13337                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
13338                         break;
13339                 case RTE_FLOW_ITEM_TYPE_ECPRI:
13340                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
13341                                 /* Create it only the first time to be used. */
13342                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
13343                                 if (ret)
13344                                         return rte_flow_error_set
13345                                                 (error, -ret,
13346                                                 RTE_FLOW_ERROR_TYPE_ITEM,
13347                                                 NULL,
13348                                                 "cannot create eCPRI parser");
13349                         }
13350                         flow_dv_translate_item_ecpri(dev, match_mask,
13351                                                      match_value, items);
13352                         /* No other protocol should follow eCPRI layer. */
13353                         last_item = MLX5_FLOW_LAYER_ECPRI;
13354                         break;
13355                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
13356                         flow_dv_translate_item_integrity(match_mask,
13357                                                          match_value,
13358                                                          head_item, items);
13359                         break;
13360                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
13361                         flow_dv_translate_item_aso_ct(dev, match_mask,
13362                                                       match_value, items);
13363                         break;
13364                 default:
13365                         break;
13366                 }
13367                 item_flags |= last_item;
13368         }
13369         /*
13370          * When E-Switch mode is enabled, we have two cases where we need to
13371          * set the source port manually.
13372          * The first one, is in case of Nic steering rule, and the second is
13373          * E-Switch rule where no port_id item was found. In both cases
13374          * the source port is set according the current port in use.
13375          */
13376         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
13377             (priv->representor || priv->master)) {
13378                 if (flow_dv_translate_item_port_id(dev, match_mask,
13379                                                    match_value, NULL, attr))
13380                         return -rte_errno;
13381         }
13382 #ifdef RTE_LIBRTE_MLX5_DEBUG
13383         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
13384                                               dev_flow->dv.value.buf));
13385 #endif
13386         /*
13387          * Layers may be already initialized from prefix flow if this dev_flow
13388          * is the suffix flow.
13389          */
13390         handle->layers |= item_flags;
13391         if (action_flags & MLX5_FLOW_ACTION_RSS)
13392                 flow_dv_hashfields_set(dev_flow, rss_desc);
13393         /* If has RSS action in the sample action, the Sample/Mirror resource
13394          * should be registered after the hash filed be update.
13395          */
13396         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
13397                 ret = flow_dv_translate_action_sample(dev,
13398                                                       sample,
13399                                                       dev_flow, attr,
13400                                                       &num_of_dest,
13401                                                       sample_actions,
13402                                                       &sample_res,
13403                                                       error);
13404                 if (ret < 0)
13405                         return ret;
13406                 ret = flow_dv_create_action_sample(dev,
13407                                                    dev_flow,
13408                                                    num_of_dest,
13409                                                    &sample_res,
13410                                                    &mdest_res,
13411                                                    sample_actions,
13412                                                    action_flags,
13413                                                    error);
13414                 if (ret < 0)
13415                         return rte_flow_error_set
13416                                                 (error, rte_errno,
13417                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13418                                                 NULL,
13419                                                 "cannot create sample action");
13420                 if (num_of_dest > 1) {
13421                         dev_flow->dv.actions[sample_act_pos] =
13422                         dev_flow->dv.dest_array_res->action;
13423                 } else {
13424                         dev_flow->dv.actions[sample_act_pos] =
13425                         dev_flow->dv.sample_res->verbs_action;
13426                 }
13427         }
13428         /*
13429          * For multiple destination (sample action with ratio=1), the encap
13430          * action and port id action will be combined into group action.
13431          * So need remove the original these actions in the flow and only
13432          * use the sample action instead of.
13433          */
13434         if (num_of_dest > 1 &&
13435             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
13436                 int i;
13437                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
13438
13439                 for (i = 0; i < actions_n; i++) {
13440                         if ((sample_act->dr_encap_action &&
13441                                 sample_act->dr_encap_action ==
13442                                 dev_flow->dv.actions[i]) ||
13443                                 (sample_act->dr_port_id_action &&
13444                                 sample_act->dr_port_id_action ==
13445                                 dev_flow->dv.actions[i]) ||
13446                                 (sample_act->dr_jump_action &&
13447                                 sample_act->dr_jump_action ==
13448                                 dev_flow->dv.actions[i]))
13449                                 continue;
13450                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
13451                 }
13452                 memcpy((void *)dev_flow->dv.actions,
13453                                 (void *)temp_actions,
13454                                 tmp_actions_n * sizeof(void *));
13455                 actions_n = tmp_actions_n;
13456         }
13457         dev_flow->dv.actions_n = actions_n;
13458         dev_flow->act_flags = action_flags;
13459         if (wks->skip_matcher_reg)
13460                 return 0;
13461         /* Register matcher. */
13462         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
13463                                     matcher.mask.size);
13464         matcher.priority = mlx5_get_matcher_priority(dev, attr,
13465                                         matcher.priority);
13466         /**
13467          * When creating meter drop flow in drop table, using original
13468          * 5-tuple match, the matcher priority should be lower than
13469          * mtr_id matcher.
13470          */
13471         if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
13472             dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP &&
13473             matcher.priority <= MLX5_REG_BITS)
13474                 matcher.priority += MLX5_REG_BITS;
13475         /* reserved field no needs to be set to 0 here. */
13476         tbl_key.is_fdb = attr->transfer;
13477         tbl_key.is_egress = attr->egress;
13478         tbl_key.level = dev_flow->dv.group;
13479         tbl_key.id = dev_flow->dv.table_id;
13480         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
13481                                      tunnel, attr->group, error))
13482                 return -rte_errno;
13483         return 0;
13484 }
13485
13486 /**
13487  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13488  * and tunnel.
13489  *
13490  * @param[in, out] action
13491  *   Shred RSS action holding hash RX queue objects.
13492  * @param[in] hash_fields
13493  *   Defines combination of packet fields to participate in RX hash.
13494  * @param[in] tunnel
13495  *   Tunnel type
13496  * @param[in] hrxq_idx
13497  *   Hash RX queue index to set.
13498  *
13499  * @return
13500  *   0 on success, otherwise negative errno value.
13501  */
13502 static int
13503 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
13504                               const uint64_t hash_fields,
13505                               uint32_t hrxq_idx)
13506 {
13507         uint32_t *hrxqs = action->hrxq;
13508
13509         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13510         case MLX5_RSS_HASH_IPV4:
13511                 /* fall-through. */
13512         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13513                 /* fall-through. */
13514         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13515                 hrxqs[0] = hrxq_idx;
13516                 return 0;
13517         case MLX5_RSS_HASH_IPV4_TCP:
13518                 /* fall-through. */
13519         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13520                 /* fall-through. */
13521         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13522                 hrxqs[1] = hrxq_idx;
13523                 return 0;
13524         case MLX5_RSS_HASH_IPV4_UDP:
13525                 /* fall-through. */
13526         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13527                 /* fall-through. */
13528         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13529                 hrxqs[2] = hrxq_idx;
13530                 return 0;
13531         case MLX5_RSS_HASH_IPV6:
13532                 /* fall-through. */
13533         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13534                 /* fall-through. */
13535         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13536                 hrxqs[3] = hrxq_idx;
13537                 return 0;
13538         case MLX5_RSS_HASH_IPV6_TCP:
13539                 /* fall-through. */
13540         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13541                 /* fall-through. */
13542         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13543                 hrxqs[4] = hrxq_idx;
13544                 return 0;
13545         case MLX5_RSS_HASH_IPV6_UDP:
13546                 /* fall-through. */
13547         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13548                 /* fall-through. */
13549         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13550                 hrxqs[5] = hrxq_idx;
13551                 return 0;
13552         case MLX5_RSS_HASH_NONE:
13553                 hrxqs[6] = hrxq_idx;
13554                 return 0;
13555         default:
13556                 return -1;
13557         }
13558 }
13559
13560 /**
13561  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13562  * and tunnel.
13563  *
13564  * @param[in] dev
13565  *   Pointer to the Ethernet device structure.
13566  * @param[in] idx
13567  *   Shared RSS action ID holding hash RX queue objects.
13568  * @param[in] hash_fields
13569  *   Defines combination of packet fields to participate in RX hash.
13570  * @param[in] tunnel
13571  *   Tunnel type
13572  *
13573  * @return
13574  *   Valid hash RX queue index, otherwise 0.
13575  */
13576 static uint32_t
13577 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
13578                                  const uint64_t hash_fields)
13579 {
13580         struct mlx5_priv *priv = dev->data->dev_private;
13581         struct mlx5_shared_action_rss *shared_rss =
13582             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13583         const uint32_t *hrxqs = shared_rss->hrxq;
13584
13585         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13586         case MLX5_RSS_HASH_IPV4:
13587                 /* fall-through. */
13588         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13589                 /* fall-through. */
13590         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13591                 return hrxqs[0];
13592         case MLX5_RSS_HASH_IPV4_TCP:
13593                 /* fall-through. */
13594         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13595                 /* fall-through. */
13596         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13597                 return hrxqs[1];
13598         case MLX5_RSS_HASH_IPV4_UDP:
13599                 /* fall-through. */
13600         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13601                 /* fall-through. */
13602         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13603                 return hrxqs[2];
13604         case MLX5_RSS_HASH_IPV6:
13605                 /* fall-through. */
13606         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13607                 /* fall-through. */
13608         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13609                 return hrxqs[3];
13610         case MLX5_RSS_HASH_IPV6_TCP:
13611                 /* fall-through. */
13612         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13613                 /* fall-through. */
13614         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13615                 return hrxqs[4];
13616         case MLX5_RSS_HASH_IPV6_UDP:
13617                 /* fall-through. */
13618         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13619                 /* fall-through. */
13620         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13621                 return hrxqs[5];
13622         case MLX5_RSS_HASH_NONE:
13623                 return hrxqs[6];
13624         default:
13625                 return 0;
13626         }
13627
13628 }
13629
13630 /**
13631  * Apply the flow to the NIC, lock free,
13632  * (mutex should be acquired by caller).
13633  *
13634  * @param[in] dev
13635  *   Pointer to the Ethernet device structure.
13636  * @param[in, out] flow
13637  *   Pointer to flow structure.
13638  * @param[out] error
13639  *   Pointer to error structure.
13640  *
13641  * @return
13642  *   0 on success, a negative errno value otherwise and rte_errno is set.
13643  */
13644 static int
13645 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
13646               struct rte_flow_error *error)
13647 {
13648         struct mlx5_flow_dv_workspace *dv;
13649         struct mlx5_flow_handle *dh;
13650         struct mlx5_flow_handle_dv *dv_h;
13651         struct mlx5_flow *dev_flow;
13652         struct mlx5_priv *priv = dev->data->dev_private;
13653         uint32_t handle_idx;
13654         int n;
13655         int err;
13656         int idx;
13657         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
13658         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
13659         uint8_t misc_mask;
13660
13661         MLX5_ASSERT(wks);
13662         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
13663                 dev_flow = &wks->flows[idx];
13664                 dv = &dev_flow->dv;
13665                 dh = dev_flow->handle;
13666                 dv_h = &dh->dvh;
13667                 n = dv->actions_n;
13668                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
13669                         if (dv->transfer) {
13670                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13671                                 dv->actions[n++] = priv->sh->dr_drop_action;
13672                         } else {
13673 #ifdef HAVE_MLX5DV_DR
13674                                 /* DR supports drop action placeholder. */
13675                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13676                                 dv->actions[n++] = priv->sh->dr_drop_action;
13677 #else
13678                                 /* For DV we use the explicit drop queue. */
13679                                 MLX5_ASSERT(priv->drop_queue.hrxq);
13680                                 dv->actions[n++] =
13681                                                 priv->drop_queue.hrxq->action;
13682 #endif
13683                         }
13684                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
13685                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
13686                         struct mlx5_hrxq *hrxq;
13687                         uint32_t hrxq_idx;
13688
13689                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
13690                                                     &hrxq_idx);
13691                         if (!hrxq) {
13692                                 rte_flow_error_set
13693                                         (error, rte_errno,
13694                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13695                                          "cannot get hash queue");
13696                                 goto error;
13697                         }
13698                         dh->rix_hrxq = hrxq_idx;
13699                         dv->actions[n++] = hrxq->action;
13700                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13701                         struct mlx5_hrxq *hrxq = NULL;
13702                         uint32_t hrxq_idx;
13703
13704                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
13705                                                 rss_desc->shared_rss,
13706                                                 dev_flow->hash_fields);
13707                         if (hrxq_idx)
13708                                 hrxq = mlx5_ipool_get
13709                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
13710                                          hrxq_idx);
13711                         if (!hrxq) {
13712                                 rte_flow_error_set
13713                                         (error, rte_errno,
13714                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13715                                          "cannot get hash queue");
13716                                 goto error;
13717                         }
13718                         dh->rix_srss = rss_desc->shared_rss;
13719                         dv->actions[n++] = hrxq->action;
13720                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
13721                         if (!priv->sh->default_miss_action) {
13722                                 rte_flow_error_set
13723                                         (error, rte_errno,
13724                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13725                                          "default miss action not be created.");
13726                                 goto error;
13727                         }
13728                         dv->actions[n++] = priv->sh->default_miss_action;
13729                 }
13730                 misc_mask = flow_dv_matcher_enable(dv->value.buf);
13731                 __flow_dv_adjust_buf_size(&dv->value.size, misc_mask);
13732                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
13733                                                (void *)&dv->value, n,
13734                                                dv->actions, &dh->drv_flow);
13735                 if (err) {
13736                         rte_flow_error_set
13737                                 (error, errno,
13738                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13739                                 NULL,
13740                                 (!priv->config.allow_duplicate_pattern &&
13741                                 errno == EEXIST) ?
13742                                 "duplicating pattern is not allowed" :
13743                                 "hardware refuses to create flow");
13744                         goto error;
13745                 }
13746                 if (priv->vmwa_context &&
13747                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
13748                         /*
13749                          * The rule contains the VLAN pattern.
13750                          * For VF we are going to create VLAN
13751                          * interface to make hypervisor set correct
13752                          * e-Switch vport context.
13753                          */
13754                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
13755                 }
13756         }
13757         return 0;
13758 error:
13759         err = rte_errno; /* Save rte_errno before cleanup. */
13760         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
13761                        handle_idx, dh, next) {
13762                 /* hrxq is union, don't clear it if the flag is not set. */
13763                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
13764                         mlx5_hrxq_release(dev, dh->rix_hrxq);
13765                         dh->rix_hrxq = 0;
13766                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13767                         dh->rix_srss = 0;
13768                 }
13769                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
13770                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
13771         }
13772         rte_errno = err; /* Restore rte_errno. */
13773         return -rte_errno;
13774 }
13775
13776 void
13777 flow_dv_matcher_remove_cb(void *tool_ctx __rte_unused,
13778                           struct mlx5_list_entry *entry)
13779 {
13780         struct mlx5_flow_dv_matcher *resource = container_of(entry,
13781                                                              typeof(*resource),
13782                                                              entry);
13783
13784         claim_zero(mlx5_flow_os_destroy_flow_matcher(resource->matcher_object));
13785         mlx5_free(resource);
13786 }
13787
13788 /**
13789  * Release the flow matcher.
13790  *
13791  * @param dev
13792  *   Pointer to Ethernet device.
13793  * @param port_id
13794  *   Index to port ID action resource.
13795  *
13796  * @return
13797  *   1 while a reference on it exists, 0 when freed.
13798  */
13799 static int
13800 flow_dv_matcher_release(struct rte_eth_dev *dev,
13801                         struct mlx5_flow_handle *handle)
13802 {
13803         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
13804         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
13805                                                             typeof(*tbl), tbl);
13806         int ret;
13807
13808         MLX5_ASSERT(matcher->matcher_object);
13809         ret = mlx5_list_unregister(tbl->matchers, &matcher->entry);
13810         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
13811         return ret;
13812 }
13813
13814 void
13815 flow_dv_encap_decap_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
13816 {
13817         struct mlx5_dev_ctx_shared *sh = tool_ctx;
13818         struct mlx5_flow_dv_encap_decap_resource *res =
13819                                        container_of(entry, typeof(*res), entry);
13820
13821         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13822         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
13823 }
13824
13825 /**
13826  * Release an encap/decap resource.
13827  *
13828  * @param dev
13829  *   Pointer to Ethernet device.
13830  * @param encap_decap_idx
13831  *   Index of encap decap resource.
13832  *
13833  * @return
13834  *   1 while a reference on it exists, 0 when freed.
13835  */
13836 static int
13837 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
13838                                      uint32_t encap_decap_idx)
13839 {
13840         struct mlx5_priv *priv = dev->data->dev_private;
13841         struct mlx5_flow_dv_encap_decap_resource *resource;
13842
13843         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
13844                                   encap_decap_idx);
13845         if (!resource)
13846                 return 0;
13847         MLX5_ASSERT(resource->action);
13848         return mlx5_hlist_unregister(priv->sh->encaps_decaps, &resource->entry);
13849 }
13850
13851 /**
13852  * Release an jump to table action resource.
13853  *
13854  * @param dev
13855  *   Pointer to Ethernet device.
13856  * @param rix_jump
13857  *   Index to the jump action resource.
13858  *
13859  * @return
13860  *   1 while a reference on it exists, 0 when freed.
13861  */
13862 static int
13863 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
13864                                   uint32_t rix_jump)
13865 {
13866         struct mlx5_priv *priv = dev->data->dev_private;
13867         struct mlx5_flow_tbl_data_entry *tbl_data;
13868
13869         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
13870                                   rix_jump);
13871         if (!tbl_data)
13872                 return 0;
13873         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
13874 }
13875
13876 void
13877 flow_dv_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
13878 {
13879         struct mlx5_flow_dv_modify_hdr_resource *res =
13880                 container_of(entry, typeof(*res), entry);
13881         struct mlx5_dev_ctx_shared *sh = tool_ctx;
13882
13883         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13884         mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
13885 }
13886
13887 /**
13888  * Release a modify-header resource.
13889  *
13890  * @param dev
13891  *   Pointer to Ethernet device.
13892  * @param handle
13893  *   Pointer to mlx5_flow_handle.
13894  *
13895  * @return
13896  *   1 while a reference on it exists, 0 when freed.
13897  */
13898 static int
13899 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
13900                                     struct mlx5_flow_handle *handle)
13901 {
13902         struct mlx5_priv *priv = dev->data->dev_private;
13903         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
13904
13905         MLX5_ASSERT(entry->action);
13906         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
13907 }
13908
13909 void
13910 flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
13911 {
13912         struct mlx5_dev_ctx_shared *sh = tool_ctx;
13913         struct mlx5_flow_dv_port_id_action_resource *resource =
13914                                   container_of(entry, typeof(*resource), entry);
13915
13916         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
13917         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
13918 }
13919
13920 /**
13921  * Release port ID action resource.
13922  *
13923  * @param dev
13924  *   Pointer to Ethernet device.
13925  * @param handle
13926  *   Pointer to mlx5_flow_handle.
13927  *
13928  * @return
13929  *   1 while a reference on it exists, 0 when freed.
13930  */
13931 static int
13932 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
13933                                         uint32_t port_id)
13934 {
13935         struct mlx5_priv *priv = dev->data->dev_private;
13936         struct mlx5_flow_dv_port_id_action_resource *resource;
13937
13938         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
13939         if (!resource)
13940                 return 0;
13941         MLX5_ASSERT(resource->action);
13942         return mlx5_list_unregister(priv->sh->port_id_action_list,
13943                                     &resource->entry);
13944 }
13945
13946 /**
13947  * Release shared RSS action resource.
13948  *
13949  * @param dev
13950  *   Pointer to Ethernet device.
13951  * @param srss
13952  *   Shared RSS action index.
13953  */
13954 static void
13955 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
13956 {
13957         struct mlx5_priv *priv = dev->data->dev_private;
13958         struct mlx5_shared_action_rss *shared_rss;
13959
13960         shared_rss = mlx5_ipool_get
13961                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
13962         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
13963 }
13964
13965 void
13966 flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
13967 {
13968         struct mlx5_dev_ctx_shared *sh = tool_ctx;
13969         struct mlx5_flow_dv_push_vlan_action_resource *resource =
13970                         container_of(entry, typeof(*resource), entry);
13971
13972         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
13973         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
13974 }
13975
13976 /**
13977  * Release push vlan action resource.
13978  *
13979  * @param dev
13980  *   Pointer to Ethernet device.
13981  * @param handle
13982  *   Pointer to mlx5_flow_handle.
13983  *
13984  * @return
13985  *   1 while a reference on it exists, 0 when freed.
13986  */
13987 static int
13988 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
13989                                           struct mlx5_flow_handle *handle)
13990 {
13991         struct mlx5_priv *priv = dev->data->dev_private;
13992         struct mlx5_flow_dv_push_vlan_action_resource *resource;
13993         uint32_t idx = handle->dvh.rix_push_vlan;
13994
13995         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
13996         if (!resource)
13997                 return 0;
13998         MLX5_ASSERT(resource->action);
13999         return mlx5_list_unregister(priv->sh->push_vlan_action_list,
14000                                     &resource->entry);
14001 }
14002
14003 /**
14004  * Release the fate resource.
14005  *
14006  * @param dev
14007  *   Pointer to Ethernet device.
14008  * @param handle
14009  *   Pointer to mlx5_flow_handle.
14010  */
14011 static void
14012 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
14013                                struct mlx5_flow_handle *handle)
14014 {
14015         if (!handle->rix_fate)
14016                 return;
14017         switch (handle->fate_action) {
14018         case MLX5_FLOW_FATE_QUEUE:
14019                 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
14020                         mlx5_hrxq_release(dev, handle->rix_hrxq);
14021                 break;
14022         case MLX5_FLOW_FATE_JUMP:
14023                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
14024                 break;
14025         case MLX5_FLOW_FATE_PORT_ID:
14026                 flow_dv_port_id_action_resource_release(dev,
14027                                 handle->rix_port_id_action);
14028                 break;
14029         default:
14030                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
14031                 break;
14032         }
14033         handle->rix_fate = 0;
14034 }
14035
14036 void
14037 flow_dv_sample_remove_cb(void *tool_ctx __rte_unused,
14038                          struct mlx5_list_entry *entry)
14039 {
14040         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
14041                                                               typeof(*resource),
14042                                                               entry);
14043         struct rte_eth_dev *dev = resource->dev;
14044         struct mlx5_priv *priv = dev->data->dev_private;
14045
14046         if (resource->verbs_action)
14047                 claim_zero(mlx5_flow_os_destroy_flow_action
14048                                                       (resource->verbs_action));
14049         if (resource->normal_path_tbl)
14050                 flow_dv_tbl_resource_release(MLX5_SH(dev),
14051                                              resource->normal_path_tbl);
14052         flow_dv_sample_sub_actions_release(dev, &resource->sample_idx);
14053         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
14054         DRV_LOG(DEBUG, "sample resource %p: removed", (void *)resource);
14055 }
14056
14057 /**
14058  * Release an sample resource.
14059  *
14060  * @param dev
14061  *   Pointer to Ethernet device.
14062  * @param handle
14063  *   Pointer to mlx5_flow_handle.
14064  *
14065  * @return
14066  *   1 while a reference on it exists, 0 when freed.
14067  */
14068 static int
14069 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
14070                                      struct mlx5_flow_handle *handle)
14071 {
14072         struct mlx5_priv *priv = dev->data->dev_private;
14073         struct mlx5_flow_dv_sample_resource *resource;
14074
14075         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
14076                                   handle->dvh.rix_sample);
14077         if (!resource)
14078                 return 0;
14079         MLX5_ASSERT(resource->verbs_action);
14080         return mlx5_list_unregister(priv->sh->sample_action_list,
14081                                     &resource->entry);
14082 }
14083
14084 void
14085 flow_dv_dest_array_remove_cb(void *tool_ctx __rte_unused,
14086                              struct mlx5_list_entry *entry)
14087 {
14088         struct mlx5_flow_dv_dest_array_resource *resource =
14089                         container_of(entry, typeof(*resource), entry);
14090         struct rte_eth_dev *dev = resource->dev;
14091         struct mlx5_priv *priv = dev->data->dev_private;
14092         uint32_t i = 0;
14093
14094         MLX5_ASSERT(resource->action);
14095         if (resource->action)
14096                 claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14097         for (; i < resource->num_of_dest; i++)
14098                 flow_dv_sample_sub_actions_release(dev,
14099                                                    &resource->sample_idx[i]);
14100         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
14101         DRV_LOG(DEBUG, "destination array resource %p: removed",
14102                 (void *)resource);
14103 }
14104
14105 /**
14106  * Release an destination array resource.
14107  *
14108  * @param dev
14109  *   Pointer to Ethernet device.
14110  * @param handle
14111  *   Pointer to mlx5_flow_handle.
14112  *
14113  * @return
14114  *   1 while a reference on it exists, 0 when freed.
14115  */
14116 static int
14117 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
14118                                     struct mlx5_flow_handle *handle)
14119 {
14120         struct mlx5_priv *priv = dev->data->dev_private;
14121         struct mlx5_flow_dv_dest_array_resource *resource;
14122
14123         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
14124                                   handle->dvh.rix_dest_array);
14125         if (!resource)
14126                 return 0;
14127         MLX5_ASSERT(resource->action);
14128         return mlx5_list_unregister(priv->sh->dest_array_list,
14129                                     &resource->entry);
14130 }
14131
14132 static void
14133 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
14134 {
14135         struct mlx5_priv *priv = dev->data->dev_private;
14136         struct mlx5_dev_ctx_shared *sh = priv->sh;
14137         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
14138                                 sh->geneve_tlv_option_resource;
14139         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
14140         if (geneve_opt_resource) {
14141                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
14142                                          __ATOMIC_RELAXED))) {
14143                         claim_zero(mlx5_devx_cmd_destroy
14144                                         (geneve_opt_resource->obj));
14145                         mlx5_free(sh->geneve_tlv_option_resource);
14146                         sh->geneve_tlv_option_resource = NULL;
14147                 }
14148         }
14149         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
14150 }
14151
14152 /**
14153  * Remove the flow from the NIC but keeps it in memory.
14154  * Lock free, (mutex should be acquired by caller).
14155  *
14156  * @param[in] dev
14157  *   Pointer to Ethernet device.
14158  * @param[in, out] flow
14159  *   Pointer to flow structure.
14160  */
14161 static void
14162 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
14163 {
14164         struct mlx5_flow_handle *dh;
14165         uint32_t handle_idx;
14166         struct mlx5_priv *priv = dev->data->dev_private;
14167
14168         if (!flow)
14169                 return;
14170         handle_idx = flow->dev_handles;
14171         while (handle_idx) {
14172                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14173                                     handle_idx);
14174                 if (!dh)
14175                         return;
14176                 if (dh->drv_flow) {
14177                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
14178                         dh->drv_flow = NULL;
14179                 }
14180                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
14181                         flow_dv_fate_resource_release(dev, dh);
14182                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
14183                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
14184                 handle_idx = dh->next.next;
14185         }
14186 }
14187
14188 /**
14189  * Remove the flow from the NIC and the memory.
14190  * Lock free, (mutex should be acquired by caller).
14191  *
14192  * @param[in] dev
14193  *   Pointer to the Ethernet device structure.
14194  * @param[in, out] flow
14195  *   Pointer to flow structure.
14196  */
14197 static void
14198 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
14199 {
14200         struct mlx5_flow_handle *dev_handle;
14201         struct mlx5_priv *priv = dev->data->dev_private;
14202         struct mlx5_flow_meter_info *fm = NULL;
14203         uint32_t srss = 0;
14204
14205         if (!flow)
14206                 return;
14207         flow_dv_remove(dev, flow);
14208         if (flow->counter) {
14209                 flow_dv_counter_free(dev, flow->counter);
14210                 flow->counter = 0;
14211         }
14212         if (flow->meter) {
14213                 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
14214                 if (fm)
14215                         mlx5_flow_meter_detach(priv, fm);
14216                 flow->meter = 0;
14217         }
14218         /* Keep the current age handling by default. */
14219         if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
14220                 flow_dv_aso_ct_release(dev, flow->ct);
14221         else if (flow->age)
14222                 flow_dv_aso_age_release(dev, flow->age);
14223         if (flow->geneve_tlv_option) {
14224                 flow_dv_geneve_tlv_option_resource_release(dev);
14225                 flow->geneve_tlv_option = 0;
14226         }
14227         while (flow->dev_handles) {
14228                 uint32_t tmp_idx = flow->dev_handles;
14229
14230                 dev_handle = mlx5_ipool_get(priv->sh->ipool
14231                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
14232                 if (!dev_handle)
14233                         return;
14234                 flow->dev_handles = dev_handle->next.next;
14235                 if (dev_handle->dvh.matcher)
14236                         flow_dv_matcher_release(dev, dev_handle);
14237                 if (dev_handle->dvh.rix_sample)
14238                         flow_dv_sample_resource_release(dev, dev_handle);
14239                 if (dev_handle->dvh.rix_dest_array)
14240                         flow_dv_dest_array_resource_release(dev, dev_handle);
14241                 if (dev_handle->dvh.rix_encap_decap)
14242                         flow_dv_encap_decap_resource_release(dev,
14243                                 dev_handle->dvh.rix_encap_decap);
14244                 if (dev_handle->dvh.modify_hdr)
14245                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
14246                 if (dev_handle->dvh.rix_push_vlan)
14247                         flow_dv_push_vlan_action_resource_release(dev,
14248                                                                   dev_handle);
14249                 if (dev_handle->dvh.rix_tag)
14250                         flow_dv_tag_release(dev,
14251                                             dev_handle->dvh.rix_tag);
14252                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
14253                         flow_dv_fate_resource_release(dev, dev_handle);
14254                 else if (!srss)
14255                         srss = dev_handle->rix_srss;
14256                 if (fm && dev_handle->is_meter_flow_id &&
14257                     dev_handle->split_flow_id)
14258                         mlx5_ipool_free(fm->flow_ipool,
14259                                         dev_handle->split_flow_id);
14260                 else if (dev_handle->split_flow_id &&
14261                     !dev_handle->is_meter_flow_id)
14262                         mlx5_ipool_free(priv->sh->ipool
14263                                         [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
14264                                         dev_handle->split_flow_id);
14265                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14266                            tmp_idx);
14267         }
14268         if (srss)
14269                 flow_dv_shared_rss_action_release(dev, srss);
14270 }
14271
14272 /**
14273  * Release array of hash RX queue objects.
14274  * Helper function.
14275  *
14276  * @param[in] dev
14277  *   Pointer to the Ethernet device structure.
14278  * @param[in, out] hrxqs
14279  *   Array of hash RX queue objects.
14280  *
14281  * @return
14282  *   Total number of references to hash RX queue objects in *hrxqs* array
14283  *   after this operation.
14284  */
14285 static int
14286 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
14287                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
14288 {
14289         size_t i;
14290         int remaining = 0;
14291
14292         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
14293                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
14294
14295                 if (!ret)
14296                         (*hrxqs)[i] = 0;
14297                 remaining += ret;
14298         }
14299         return remaining;
14300 }
14301
14302 /**
14303  * Release all hash RX queue objects representing shared RSS action.
14304  *
14305  * @param[in] dev
14306  *   Pointer to the Ethernet device structure.
14307  * @param[in, out] action
14308  *   Shared RSS action to remove hash RX queue objects from.
14309  *
14310  * @return
14311  *   Total number of references to hash RX queue objects stored in *action*
14312  *   after this operation.
14313  *   Expected to be 0 if no external references held.
14314  */
14315 static int
14316 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
14317                                  struct mlx5_shared_action_rss *shared_rss)
14318 {
14319         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
14320 }
14321
14322 /**
14323  * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
14324  * user input.
14325  *
14326  * Only one hash value is available for one L3+L4 combination:
14327  * for example:
14328  * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
14329  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
14330  * same slot in mlx5_rss_hash_fields.
14331  *
14332  * @param[in] rss
14333  *   Pointer to the shared action RSS conf.
14334  * @param[in, out] hash_field
14335  *   hash_field variable needed to be adjusted.
14336  *
14337  * @return
14338  *   void
14339  */
14340 static void
14341 __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
14342                                      uint64_t *hash_field)
14343 {
14344         uint64_t rss_types = rss->origin.types;
14345
14346         switch (*hash_field & ~IBV_RX_HASH_INNER) {
14347         case MLX5_RSS_HASH_IPV4:
14348                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
14349                         *hash_field &= ~MLX5_RSS_HASH_IPV4;
14350                         if (rss_types & ETH_RSS_L3_DST_ONLY)
14351                                 *hash_field |= IBV_RX_HASH_DST_IPV4;
14352                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
14353                                 *hash_field |= IBV_RX_HASH_SRC_IPV4;
14354                         else
14355                                 *hash_field |= MLX5_RSS_HASH_IPV4;
14356                 }
14357                 return;
14358         case MLX5_RSS_HASH_IPV6:
14359                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
14360                         *hash_field &= ~MLX5_RSS_HASH_IPV6;
14361                         if (rss_types & ETH_RSS_L3_DST_ONLY)
14362                                 *hash_field |= IBV_RX_HASH_DST_IPV6;
14363                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
14364                                 *hash_field |= IBV_RX_HASH_SRC_IPV6;
14365                         else
14366                                 *hash_field |= MLX5_RSS_HASH_IPV6;
14367                 }
14368                 return;
14369         case MLX5_RSS_HASH_IPV4_UDP:
14370                 /* fall-through. */
14371         case MLX5_RSS_HASH_IPV6_UDP:
14372                 if (rss_types & ETH_RSS_UDP) {
14373                         *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
14374                         if (rss_types & ETH_RSS_L4_DST_ONLY)
14375                                 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
14376                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
14377                                 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
14378                         else
14379                                 *hash_field |= MLX5_UDP_IBV_RX_HASH;
14380                 }
14381                 return;
14382         case MLX5_RSS_HASH_IPV4_TCP:
14383                 /* fall-through. */
14384         case MLX5_RSS_HASH_IPV6_TCP:
14385                 if (rss_types & ETH_RSS_TCP) {
14386                         *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
14387                         if (rss_types & ETH_RSS_L4_DST_ONLY)
14388                                 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
14389                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
14390                                 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
14391                         else
14392                                 *hash_field |= MLX5_TCP_IBV_RX_HASH;
14393                 }
14394                 return;
14395         default:
14396                 return;
14397         }
14398 }
14399
14400 /**
14401  * Setup shared RSS action.
14402  * Prepare set of hash RX queue objects sufficient to handle all valid
14403  * hash_fields combinations (see enum ibv_rx_hash_fields).
14404  *
14405  * @param[in] dev
14406  *   Pointer to the Ethernet device structure.
14407  * @param[in] action_idx
14408  *   Shared RSS action ipool index.
14409  * @param[in, out] action
14410  *   Partially initialized shared RSS action.
14411  * @param[out] error
14412  *   Perform verbose error reporting if not NULL. Initialized in case of
14413  *   error only.
14414  *
14415  * @return
14416  *   0 on success, otherwise negative errno value.
14417  */
14418 static int
14419 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
14420                            uint32_t action_idx,
14421                            struct mlx5_shared_action_rss *shared_rss,
14422                            struct rte_flow_error *error)
14423 {
14424         struct mlx5_flow_rss_desc rss_desc = { 0 };
14425         size_t i;
14426         int err;
14427
14428         if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
14429                 return rte_flow_error_set(error, rte_errno,
14430                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14431                                           "cannot setup indirection table");
14432         }
14433         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
14434         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
14435         rss_desc.const_q = shared_rss->origin.queue;
14436         rss_desc.queue_num = shared_rss->origin.queue_num;
14437         /* Set non-zero value to indicate a shared RSS. */
14438         rss_desc.shared_rss = action_idx;
14439         rss_desc.ind_tbl = shared_rss->ind_tbl;
14440         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
14441                 uint32_t hrxq_idx;
14442                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
14443                 int tunnel = 0;
14444
14445                 __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
14446                 if (shared_rss->origin.level > 1) {
14447                         hash_fields |= IBV_RX_HASH_INNER;
14448                         tunnel = 1;
14449                 }
14450                 rss_desc.tunnel = tunnel;
14451                 rss_desc.hash_fields = hash_fields;
14452                 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
14453                 if (!hrxq_idx) {
14454                         rte_flow_error_set
14455                                 (error, rte_errno,
14456                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14457                                  "cannot get hash queue");
14458                         goto error_hrxq_new;
14459                 }
14460                 err = __flow_dv_action_rss_hrxq_set
14461                         (shared_rss, hash_fields, hrxq_idx);
14462                 MLX5_ASSERT(!err);
14463         }
14464         return 0;
14465 error_hrxq_new:
14466         err = rte_errno;
14467         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14468         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
14469                 shared_rss->ind_tbl = NULL;
14470         rte_errno = err;
14471         return -rte_errno;
14472 }
14473
14474 /**
14475  * Create shared RSS action.
14476  *
14477  * @param[in] dev
14478  *   Pointer to the Ethernet device structure.
14479  * @param[in] conf
14480  *   Shared action configuration.
14481  * @param[in] rss
14482  *   RSS action specification used to create shared action.
14483  * @param[out] error
14484  *   Perform verbose error reporting if not NULL. Initialized in case of
14485  *   error only.
14486  *
14487  * @return
14488  *   A valid shared action ID in case of success, 0 otherwise and
14489  *   rte_errno is set.
14490  */
14491 static uint32_t
14492 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
14493                             const struct rte_flow_indir_action_conf *conf,
14494                             const struct rte_flow_action_rss *rss,
14495                             struct rte_flow_error *error)
14496 {
14497         struct mlx5_priv *priv = dev->data->dev_private;
14498         struct mlx5_shared_action_rss *shared_rss = NULL;
14499         void *queue = NULL;
14500         struct rte_flow_action_rss *origin;
14501         const uint8_t *rss_key;
14502         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
14503         uint32_t idx;
14504
14505         RTE_SET_USED(conf);
14506         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14507                             0, SOCKET_ID_ANY);
14508         shared_rss = mlx5_ipool_zmalloc
14509                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
14510         if (!shared_rss || !queue) {
14511                 rte_flow_error_set(error, ENOMEM,
14512                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14513                                    "cannot allocate resource memory");
14514                 goto error_rss_init;
14515         }
14516         if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
14517                 rte_flow_error_set(error, E2BIG,
14518                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14519                                    "rss action number out of range");
14520                 goto error_rss_init;
14521         }
14522         shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
14523                                           sizeof(*shared_rss->ind_tbl),
14524                                           0, SOCKET_ID_ANY);
14525         if (!shared_rss->ind_tbl) {
14526                 rte_flow_error_set(error, ENOMEM,
14527                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14528                                    "cannot allocate resource memory");
14529                 goto error_rss_init;
14530         }
14531         memcpy(queue, rss->queue, queue_size);
14532         shared_rss->ind_tbl->queues = queue;
14533         shared_rss->ind_tbl->queues_n = rss->queue_num;
14534         origin = &shared_rss->origin;
14535         origin->func = rss->func;
14536         origin->level = rss->level;
14537         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
14538         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
14539         /* NULL RSS key indicates default RSS key. */
14540         rss_key = !rss->key ? rss_hash_default_key : rss->key;
14541         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
14542         origin->key = &shared_rss->key[0];
14543         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
14544         origin->queue = queue;
14545         origin->queue_num = rss->queue_num;
14546         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
14547                 goto error_rss_init;
14548         rte_spinlock_init(&shared_rss->action_rss_sl);
14549         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14550         rte_spinlock_lock(&priv->shared_act_sl);
14551         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14552                      &priv->rss_shared_actions, idx, shared_rss, next);
14553         rte_spinlock_unlock(&priv->shared_act_sl);
14554         return idx;
14555 error_rss_init:
14556         if (shared_rss) {
14557                 if (shared_rss->ind_tbl)
14558                         mlx5_free(shared_rss->ind_tbl);
14559                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14560                                 idx);
14561         }
14562         if (queue)
14563                 mlx5_free(queue);
14564         return 0;
14565 }
14566
14567 /**
14568  * Destroy the shared RSS action.
14569  * Release related hash RX queue objects.
14570  *
14571  * @param[in] dev
14572  *   Pointer to the Ethernet device structure.
14573  * @param[in] idx
14574  *   The shared RSS action object ID to be removed.
14575  * @param[out] error
14576  *   Perform verbose error reporting if not NULL. Initialized in case of
14577  *   error only.
14578  *
14579  * @return
14580  *   0 on success, otherwise negative errno value.
14581  */
14582 static int
14583 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
14584                              struct rte_flow_error *error)
14585 {
14586         struct mlx5_priv *priv = dev->data->dev_private;
14587         struct mlx5_shared_action_rss *shared_rss =
14588             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14589         uint32_t old_refcnt = 1;
14590         int remaining;
14591         uint16_t *queue = NULL;
14592
14593         if (!shared_rss)
14594                 return rte_flow_error_set(error, EINVAL,
14595                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14596                                           "invalid shared action");
14597         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14598         if (remaining)
14599                 return rte_flow_error_set(error, EBUSY,
14600                                           RTE_FLOW_ERROR_TYPE_ACTION,
14601                                           NULL,
14602                                           "shared rss hrxq has references");
14603         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
14604                                          0, 0, __ATOMIC_ACQUIRE,
14605                                          __ATOMIC_RELAXED))
14606                 return rte_flow_error_set(error, EBUSY,
14607                                           RTE_FLOW_ERROR_TYPE_ACTION,
14608                                           NULL,
14609                                           "shared rss has references");
14610         queue = shared_rss->ind_tbl->queues;
14611         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
14612         if (remaining)
14613                 return rte_flow_error_set(error, EBUSY,
14614                                           RTE_FLOW_ERROR_TYPE_ACTION,
14615                                           NULL,
14616                                           "shared rss indirection table has"
14617                                           " references");
14618         mlx5_free(queue);
14619         rte_spinlock_lock(&priv->shared_act_sl);
14620         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14621                      &priv->rss_shared_actions, idx, shared_rss, next);
14622         rte_spinlock_unlock(&priv->shared_act_sl);
14623         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14624                         idx);
14625         return 0;
14626 }
14627
14628 /**
14629  * Create indirect action, lock free,
14630  * (mutex should be acquired by caller).
14631  * Dispatcher for action type specific call.
14632  *
14633  * @param[in] dev
14634  *   Pointer to the Ethernet device structure.
14635  * @param[in] conf
14636  *   Shared action configuration.
14637  * @param[in] action
14638  *   Action specification used to create indirect action.
14639  * @param[out] error
14640  *   Perform verbose error reporting if not NULL. Initialized in case of
14641  *   error only.
14642  *
14643  * @return
14644  *   A valid shared action handle in case of success, NULL otherwise and
14645  *   rte_errno is set.
14646  */
14647 static struct rte_flow_action_handle *
14648 flow_dv_action_create(struct rte_eth_dev *dev,
14649                       const struct rte_flow_indir_action_conf *conf,
14650                       const struct rte_flow_action *action,
14651                       struct rte_flow_error *err)
14652 {
14653         struct mlx5_priv *priv = dev->data->dev_private;
14654         uint32_t age_idx = 0;
14655         uint32_t idx = 0;
14656         uint32_t ret = 0;
14657
14658         switch (action->type) {
14659         case RTE_FLOW_ACTION_TYPE_RSS:
14660                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
14661                 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
14662                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14663                 break;
14664         case RTE_FLOW_ACTION_TYPE_AGE:
14665                 age_idx = flow_dv_aso_age_alloc(dev, err);
14666                 if (!age_idx) {
14667                         ret = -rte_errno;
14668                         break;
14669                 }
14670                 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
14671                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
14672                 flow_dv_aso_age_params_init(dev, age_idx,
14673                                         ((const struct rte_flow_action_age *)
14674                                                 action->conf)->context ?
14675                                         ((const struct rte_flow_action_age *)
14676                                                 action->conf)->context :
14677                                         (void *)(uintptr_t)idx,
14678                                         ((const struct rte_flow_action_age *)
14679                                                 action->conf)->timeout);
14680                 ret = age_idx;
14681                 break;
14682         case RTE_FLOW_ACTION_TYPE_COUNT:
14683                 ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
14684                 idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
14685                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14686                 break;
14687         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
14688                 ret = flow_dv_translate_create_conntrack(dev, action->conf,
14689                                                          err);
14690                 idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
14691                 break;
14692         default:
14693                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
14694                                    NULL, "action type not supported");
14695                 break;
14696         }
14697         return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
14698 }
14699
14700 /**
14701  * Destroy the indirect action.
14702  * Release action related resources on the NIC and the memory.
14703  * Lock free, (mutex should be acquired by caller).
14704  * Dispatcher for action type specific call.
14705  *
14706  * @param[in] dev
14707  *   Pointer to the Ethernet device structure.
14708  * @param[in] handle
14709  *   The indirect action object handle to be removed.
14710  * @param[out] error
14711  *   Perform verbose error reporting if not NULL. Initialized in case of
14712  *   error only.
14713  *
14714  * @return
14715  *   0 on success, otherwise negative errno value.
14716  */
14717 static int
14718 flow_dv_action_destroy(struct rte_eth_dev *dev,
14719                        struct rte_flow_action_handle *handle,
14720                        struct rte_flow_error *error)
14721 {
14722         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14723         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14724         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14725         struct mlx5_flow_counter *cnt;
14726         uint32_t no_flow_refcnt = 1;
14727         int ret;
14728
14729         switch (type) {
14730         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14731                 return __flow_dv_action_rss_release(dev, idx, error);
14732         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
14733                 cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
14734                 if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
14735                                                  &no_flow_refcnt, 1, false,
14736                                                  __ATOMIC_ACQUIRE,
14737                                                  __ATOMIC_RELAXED))
14738                         return rte_flow_error_set(error, EBUSY,
14739                                                   RTE_FLOW_ERROR_TYPE_ACTION,
14740                                                   NULL,
14741                                                   "Indirect count action has references");
14742                 flow_dv_counter_free(dev, idx);
14743                 return 0;
14744         case MLX5_INDIRECT_ACTION_TYPE_AGE:
14745                 ret = flow_dv_aso_age_release(dev, idx);
14746                 if (ret)
14747                         /*
14748                          * In this case, the last flow has a reference will
14749                          * actually release the age action.
14750                          */
14751                         DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
14752                                 " released with references %d.", idx, ret);
14753                 return 0;
14754         case MLX5_INDIRECT_ACTION_TYPE_CT:
14755                 ret = flow_dv_aso_ct_release(dev, idx);
14756                 if (ret < 0)
14757                         return ret;
14758                 if (ret > 0)
14759                         DRV_LOG(DEBUG, "Connection tracking object %u still "
14760                                 "has references %d.", idx, ret);
14761                 return 0;
14762         default:
14763                 return rte_flow_error_set(error, ENOTSUP,
14764                                           RTE_FLOW_ERROR_TYPE_ACTION,
14765                                           NULL,
14766                                           "action type not supported");
14767         }
14768 }
14769
14770 /**
14771  * Updates in place shared RSS action configuration.
14772  *
14773  * @param[in] dev
14774  *   Pointer to the Ethernet device structure.
14775  * @param[in] idx
14776  *   The shared RSS action object ID to be updated.
14777  * @param[in] action_conf
14778  *   RSS action specification used to modify *shared_rss*.
14779  * @param[out] error
14780  *   Perform verbose error reporting if not NULL. Initialized in case of
14781  *   error only.
14782  *
14783  * @return
14784  *   0 on success, otherwise negative errno value.
14785  * @note: currently only support update of RSS queues.
14786  */
14787 static int
14788 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
14789                             const struct rte_flow_action_rss *action_conf,
14790                             struct rte_flow_error *error)
14791 {
14792         struct mlx5_priv *priv = dev->data->dev_private;
14793         struct mlx5_shared_action_rss *shared_rss =
14794             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14795         int ret = 0;
14796         void *queue = NULL;
14797         uint16_t *queue_old = NULL;
14798         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
14799
14800         if (!shared_rss)
14801                 return rte_flow_error_set(error, EINVAL,
14802                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14803                                           "invalid shared action to update");
14804         if (priv->obj_ops.ind_table_modify == NULL)
14805                 return rte_flow_error_set(error, ENOTSUP,
14806                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14807                                           "cannot modify indirection table");
14808         queue = mlx5_malloc(MLX5_MEM_ZERO,
14809                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14810                             0, SOCKET_ID_ANY);
14811         if (!queue)
14812                 return rte_flow_error_set(error, ENOMEM,
14813                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14814                                           NULL,
14815                                           "cannot allocate resource memory");
14816         memcpy(queue, action_conf->queue, queue_size);
14817         MLX5_ASSERT(shared_rss->ind_tbl);
14818         rte_spinlock_lock(&shared_rss->action_rss_sl);
14819         queue_old = shared_rss->ind_tbl->queues;
14820         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
14821                                         queue, action_conf->queue_num, true);
14822         if (ret) {
14823                 mlx5_free(queue);
14824                 ret = rte_flow_error_set(error, rte_errno,
14825                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14826                                           "cannot update indirection table");
14827         } else {
14828                 mlx5_free(queue_old);
14829                 shared_rss->origin.queue = queue;
14830                 shared_rss->origin.queue_num = action_conf->queue_num;
14831         }
14832         rte_spinlock_unlock(&shared_rss->action_rss_sl);
14833         return ret;
14834 }
14835
14836 /*
14837  * Updates in place conntrack context or direction.
14838  * Context update should be synchronized.
14839  *
14840  * @param[in] dev
14841  *   Pointer to the Ethernet device structure.
14842  * @param[in] idx
14843  *   The conntrack object ID to be updated.
14844  * @param[in] update
14845  *   Pointer to the structure of information to update.
14846  * @param[out] error
14847  *   Perform verbose error reporting if not NULL. Initialized in case of
14848  *   error only.
14849  *
14850  * @return
14851  *   0 on success, otherwise negative errno value.
14852  */
14853 static int
14854 __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,
14855                            const struct rte_flow_modify_conntrack *update,
14856                            struct rte_flow_error *error)
14857 {
14858         struct mlx5_priv *priv = dev->data->dev_private;
14859         struct mlx5_aso_ct_action *ct;
14860         const struct rte_flow_action_conntrack *new_prf;
14861         int ret = 0;
14862         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
14863         uint32_t dev_idx;
14864
14865         if (PORT_ID(priv) != owner)
14866                 return rte_flow_error_set(error, EACCES,
14867                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14868                                           NULL,
14869                                           "CT object owned by another port");
14870         dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
14871         ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
14872         if (!ct->refcnt)
14873                 return rte_flow_error_set(error, ENOMEM,
14874                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14875                                           NULL,
14876                                           "CT object is inactive");
14877         new_prf = &update->new_ct;
14878         if (update->direction)
14879                 ct->is_original = !!new_prf->is_original_dir;
14880         if (update->state) {
14881                 /* Only validate the profile when it needs to be updated. */
14882                 ret = mlx5_validate_action_ct(dev, new_prf, error);
14883                 if (ret)
14884                         return ret;
14885                 ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
14886                 if (ret)
14887                         return rte_flow_error_set(error, EIO,
14888                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14889                                         NULL,
14890                                         "Failed to send CT context update WQE");
14891                 /* Block until ready or a failure. */
14892                 ret = mlx5_aso_ct_available(priv->sh, ct);
14893                 if (ret)
14894                         rte_flow_error_set(error, rte_errno,
14895                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14896                                            NULL,
14897                                            "Timeout to get the CT update");
14898         }
14899         return ret;
14900 }
14901
14902 /**
14903  * Updates in place shared action configuration, lock free,
14904  * (mutex should be acquired by caller).
14905  *
14906  * @param[in] dev
14907  *   Pointer to the Ethernet device structure.
14908  * @param[in] handle
14909  *   The indirect action object handle to be updated.
14910  * @param[in] update
14911  *   Action specification used to modify the action pointed by *handle*.
14912  *   *update* could be of same type with the action pointed by the *handle*
14913  *   handle argument, or some other structures like a wrapper, depending on
14914  *   the indirect action type.
14915  * @param[out] error
14916  *   Perform verbose error reporting if not NULL. Initialized in case of
14917  *   error only.
14918  *
14919  * @return
14920  *   0 on success, otherwise negative errno value.
14921  */
14922 static int
14923 flow_dv_action_update(struct rte_eth_dev *dev,
14924                         struct rte_flow_action_handle *handle,
14925                         const void *update,
14926                         struct rte_flow_error *err)
14927 {
14928         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14929         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14930         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14931         const void *action_conf;
14932
14933         switch (type) {
14934         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14935                 action_conf = ((const struct rte_flow_action *)update)->conf;
14936                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
14937         case MLX5_INDIRECT_ACTION_TYPE_CT:
14938                 return __flow_dv_action_ct_update(dev, idx, update, err);
14939         default:
14940                 return rte_flow_error_set(err, ENOTSUP,
14941                                           RTE_FLOW_ERROR_TYPE_ACTION,
14942                                           NULL,
14943                                           "action type update not supported");
14944         }
14945 }
14946
14947 /**
14948  * Destroy the meter sub policy table rules.
14949  * Lock free, (mutex should be acquired by caller).
14950  *
14951  * @param[in] dev
14952  *   Pointer to Ethernet device.
14953  * @param[in] sub_policy
14954  *   Pointer to meter sub policy table.
14955  */
14956 static void
14957 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
14958                              struct mlx5_flow_meter_sub_policy *sub_policy)
14959 {
14960         struct mlx5_priv *priv = dev->data->dev_private;
14961         struct mlx5_flow_tbl_data_entry *tbl;
14962         struct mlx5_flow_meter_policy *policy = sub_policy->main_policy;
14963         struct mlx5_flow_meter_info *next_fm;
14964         struct mlx5_sub_policy_color_rule *color_rule;
14965         void *tmp;
14966         uint32_t i;
14967
14968         for (i = 0; i < RTE_COLORS; i++) {
14969                 next_fm = NULL;
14970                 if (i == RTE_COLOR_GREEN && policy &&
14971                     policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR)
14972                         next_fm = mlx5_flow_meter_find(priv,
14973                                         policy->act_cnt[i].next_mtr_id, NULL);
14974                 TAILQ_FOREACH_SAFE(color_rule, &sub_policy->color_rules[i],
14975                                    next_port, tmp) {
14976                         claim_zero(mlx5_flow_os_destroy_flow(color_rule->rule));
14977                         tbl = container_of(color_rule->matcher->tbl,
14978                                         typeof(*tbl), tbl);
14979                         mlx5_list_unregister(tbl->matchers,
14980                                                 &color_rule->matcher->entry);
14981                         TAILQ_REMOVE(&sub_policy->color_rules[i],
14982                                         color_rule, next_port);
14983                         mlx5_free(color_rule);
14984                         if (next_fm)
14985                                 mlx5_flow_meter_detach(priv, next_fm);
14986                 }
14987         }
14988         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
14989                 if (sub_policy->rix_hrxq[i]) {
14990                         if (policy && !policy->is_hierarchy)
14991                                 mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
14992                         sub_policy->rix_hrxq[i] = 0;
14993                 }
14994                 if (sub_policy->jump_tbl[i]) {
14995                         flow_dv_tbl_resource_release(MLX5_SH(dev),
14996                         sub_policy->jump_tbl[i]);
14997                         sub_policy->jump_tbl[i] = NULL;
14998                 }
14999         }
15000         if (sub_policy->tbl_rsc) {
15001                 flow_dv_tbl_resource_release(MLX5_SH(dev),
15002                         sub_policy->tbl_rsc);
15003                 sub_policy->tbl_rsc = NULL;
15004         }
15005 }
15006
15007 /**
15008  * Destroy policy rules, lock free,
15009  * (mutex should be acquired by caller).
15010  * Dispatcher for action type specific call.
15011  *
15012  * @param[in] dev
15013  *   Pointer to the Ethernet device structure.
15014  * @param[in] mtr_policy
15015  *   Meter policy struct.
15016  */
15017 static void
15018 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
15019                       struct mlx5_flow_meter_policy *mtr_policy)
15020 {
15021         uint32_t i, j;
15022         struct mlx5_flow_meter_sub_policy *sub_policy;
15023         uint16_t sub_policy_num;
15024
15025         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15026                 sub_policy_num = (mtr_policy->sub_policy_num >>
15027                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15028                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15029                 for (j = 0; j < sub_policy_num; j++) {
15030                         sub_policy = mtr_policy->sub_policys[i][j];
15031                         if (sub_policy)
15032                                 __flow_dv_destroy_sub_policy_rules
15033                                                 (dev, sub_policy);
15034                 }
15035         }
15036 }
15037
15038 /**
15039  * Destroy policy action, lock free,
15040  * (mutex should be acquired by caller).
15041  * Dispatcher for action type specific call.
15042  *
15043  * @param[in] dev
15044  *   Pointer to the Ethernet device structure.
15045  * @param[in] mtr_policy
15046  *   Meter policy struct.
15047  */
15048 static void
15049 flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
15050                       struct mlx5_flow_meter_policy *mtr_policy)
15051 {
15052         struct rte_flow_action *rss_action;
15053         struct mlx5_flow_handle dev_handle;
15054         uint32_t i, j;
15055
15056         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15057                 if (mtr_policy->act_cnt[i].rix_mark) {
15058                         flow_dv_tag_release(dev,
15059                                 mtr_policy->act_cnt[i].rix_mark);
15060                         mtr_policy->act_cnt[i].rix_mark = 0;
15061                 }
15062                 if (mtr_policy->act_cnt[i].modify_hdr) {
15063                         dev_handle.dvh.modify_hdr =
15064                                 mtr_policy->act_cnt[i].modify_hdr;
15065                         flow_dv_modify_hdr_resource_release(dev, &dev_handle);
15066                 }
15067                 switch (mtr_policy->act_cnt[i].fate_action) {
15068                 case MLX5_FLOW_FATE_SHARED_RSS:
15069                         rss_action = mtr_policy->act_cnt[i].rss;
15070                         mlx5_free(rss_action);
15071                         break;
15072                 case MLX5_FLOW_FATE_PORT_ID:
15073                         if (mtr_policy->act_cnt[i].rix_port_id_action) {
15074                                 flow_dv_port_id_action_resource_release(dev,
15075                                 mtr_policy->act_cnt[i].rix_port_id_action);
15076                                 mtr_policy->act_cnt[i].rix_port_id_action = 0;
15077                         }
15078                         break;
15079                 case MLX5_FLOW_FATE_DROP:
15080                 case MLX5_FLOW_FATE_JUMP:
15081                         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15082                                 mtr_policy->act_cnt[i].dr_jump_action[j] =
15083                                                 NULL;
15084                         break;
15085                 default:
15086                         /*Queue action do nothing*/
15087                         break;
15088                 }
15089         }
15090         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15091                 mtr_policy->dr_drop_action[j] = NULL;
15092 }
15093
15094 /**
15095  * Create policy action per domain, lock free,
15096  * (mutex should be acquired by caller).
15097  * Dispatcher for action type specific call.
15098  *
15099  * @param[in] dev
15100  *   Pointer to the Ethernet device structure.
15101  * @param[in] mtr_policy
15102  *   Meter policy struct.
15103  * @param[in] action
15104  *   Action specification used to create meter actions.
15105  * @param[out] error
15106  *   Perform verbose error reporting if not NULL. Initialized in case of
15107  *   error only.
15108  *
15109  * @return
15110  *   0 on success, otherwise negative errno value.
15111  */
15112 static int
15113 __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
15114                         struct mlx5_flow_meter_policy *mtr_policy,
15115                         const struct rte_flow_action *actions[RTE_COLORS],
15116                         enum mlx5_meter_domain domain,
15117                         struct rte_mtr_error *error)
15118 {
15119         struct mlx5_priv *priv = dev->data->dev_private;
15120         struct rte_flow_error flow_err;
15121         const struct rte_flow_action *act;
15122         uint64_t action_flags = 0;
15123         struct mlx5_flow_handle dh;
15124         struct mlx5_flow dev_flow;
15125         struct mlx5_flow_dv_port_id_action_resource port_id_action;
15126         int i, ret;
15127         uint8_t egress, transfer;
15128         struct mlx5_meter_policy_action_container *act_cnt = NULL;
15129         union {
15130                 struct mlx5_flow_dv_modify_hdr_resource res;
15131                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
15132                             sizeof(struct mlx5_modification_cmd) *
15133                             (MLX5_MAX_MODIFY_NUM + 1)];
15134         } mhdr_dummy;
15135         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
15136
15137         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15138         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15139         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
15140         memset(&dev_flow, 0, sizeof(struct mlx5_flow));
15141         memset(&port_id_action, 0,
15142                 sizeof(struct mlx5_flow_dv_port_id_action_resource));
15143         memset(mhdr_res, 0, sizeof(*mhdr_res));
15144         mhdr_res->ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
15145                                         egress ?
15146                                         MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
15147                                         MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
15148         dev_flow.handle = &dh;
15149         dev_flow.dv.port_id_action = &port_id_action;
15150         dev_flow.external = true;
15151         for (i = 0; i < RTE_COLORS; i++) {
15152                 if (i < MLX5_MTR_RTE_COLORS)
15153                         act_cnt = &mtr_policy->act_cnt[i];
15154                 for (act = actions[i];
15155                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
15156                         act++) {
15157                         switch (act->type) {
15158                         case RTE_FLOW_ACTION_TYPE_MARK:
15159                         {
15160                                 uint32_t tag_be = mlx5_flow_mark_set
15161                                         (((const struct rte_flow_action_mark *)
15162                                         (act->conf))->id);
15163
15164                                 if (i >= MLX5_MTR_RTE_COLORS)
15165                                         return -rte_mtr_error_set(error,
15166                                           ENOTSUP,
15167                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15168                                           NULL,
15169                                           "cannot create policy "
15170                                           "mark action for this color");
15171                                 dev_flow.handle->mark = 1;
15172                                 if (flow_dv_tag_resource_register(dev, tag_be,
15173                                                   &dev_flow, &flow_err))
15174                                         return -rte_mtr_error_set(error,
15175                                         ENOTSUP,
15176                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15177                                         NULL,
15178                                         "cannot setup policy mark action");
15179                                 MLX5_ASSERT(dev_flow.dv.tag_resource);
15180                                 act_cnt->rix_mark =
15181                                         dev_flow.handle->dvh.rix_tag;
15182                                 action_flags |= MLX5_FLOW_ACTION_MARK;
15183                                 break;
15184                         }
15185                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
15186                                 if (i >= MLX5_MTR_RTE_COLORS)
15187                                         return -rte_mtr_error_set(error,
15188                                           ENOTSUP,
15189                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15190                                           NULL,
15191                                           "cannot create policy "
15192                                           "set tag action for this color");
15193                                 if (flow_dv_convert_action_set_tag
15194                                 (dev, mhdr_res,
15195                                 (const struct rte_flow_action_set_tag *)
15196                                 act->conf,  &flow_err))
15197                                         return -rte_mtr_error_set(error,
15198                                         ENOTSUP,
15199                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15200                                         NULL, "cannot convert policy "
15201                                         "set tag action");
15202                                 if (!mhdr_res->actions_num)
15203                                         return -rte_mtr_error_set(error,
15204                                         ENOTSUP,
15205                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15206                                         NULL, "cannot find policy "
15207                                         "set tag action");
15208                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
15209                                 break;
15210                         case RTE_FLOW_ACTION_TYPE_DROP:
15211                         {
15212                                 struct mlx5_flow_mtr_mng *mtrmng =
15213                                                 priv->sh->mtrmng;
15214                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15215
15216                                 /*
15217                                  * Create the drop table with
15218                                  * METER DROP level.
15219                                  */
15220                                 if (!mtrmng->drop_tbl[domain]) {
15221                                         mtrmng->drop_tbl[domain] =
15222                                         flow_dv_tbl_resource_get(dev,
15223                                         MLX5_FLOW_TABLE_LEVEL_METER,
15224                                         egress, transfer, false, NULL, 0,
15225                                         0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
15226                                         if (!mtrmng->drop_tbl[domain])
15227                                                 return -rte_mtr_error_set
15228                                         (error, ENOTSUP,
15229                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15230                                         NULL,
15231                                         "Failed to create meter drop table");
15232                                 }
15233                                 tbl_data = container_of
15234                                 (mtrmng->drop_tbl[domain],
15235                                 struct mlx5_flow_tbl_data_entry, tbl);
15236                                 if (i < MLX5_MTR_RTE_COLORS) {
15237                                         act_cnt->dr_jump_action[domain] =
15238                                                 tbl_data->jump.action;
15239                                         act_cnt->fate_action =
15240                                                 MLX5_FLOW_FATE_DROP;
15241                                 }
15242                                 if (i == RTE_COLOR_RED)
15243                                         mtr_policy->dr_drop_action[domain] =
15244                                                 tbl_data->jump.action;
15245                                 action_flags |= MLX5_FLOW_ACTION_DROP;
15246                                 break;
15247                         }
15248                         case RTE_FLOW_ACTION_TYPE_QUEUE:
15249                         {
15250                                 if (i >= MLX5_MTR_RTE_COLORS)
15251                                         return -rte_mtr_error_set(error,
15252                                         ENOTSUP,
15253                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15254                                         NULL, "cannot create policy "
15255                                         "fate queue for this color");
15256                                 act_cnt->queue =
15257                                 ((const struct rte_flow_action_queue *)
15258                                         (act->conf))->index;
15259                                 act_cnt->fate_action =
15260                                         MLX5_FLOW_FATE_QUEUE;
15261                                 dev_flow.handle->fate_action =
15262                                         MLX5_FLOW_FATE_QUEUE;
15263                                 mtr_policy->is_queue = 1;
15264                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
15265                                 break;
15266                         }
15267                         case RTE_FLOW_ACTION_TYPE_RSS:
15268                         {
15269                                 int rss_size;
15270
15271                                 if (i >= MLX5_MTR_RTE_COLORS)
15272                                         return -rte_mtr_error_set(error,
15273                                           ENOTSUP,
15274                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15275                                           NULL,
15276                                           "cannot create policy "
15277                                           "rss action for this color");
15278                                 /*
15279                                  * Save RSS conf into policy struct
15280                                  * for translate stage.
15281                                  */
15282                                 rss_size = (int)rte_flow_conv
15283                                         (RTE_FLOW_CONV_OP_ACTION,
15284                                         NULL, 0, act, &flow_err);
15285                                 if (rss_size <= 0)
15286                                         return -rte_mtr_error_set(error,
15287                                           ENOTSUP,
15288                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15289                                           NULL, "Get the wrong "
15290                                           "rss action struct size");
15291                                 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
15292                                                 rss_size, 0, SOCKET_ID_ANY);
15293                                 if (!act_cnt->rss)
15294                                         return -rte_mtr_error_set(error,
15295                                           ENOTSUP,
15296                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15297                                           NULL,
15298                                           "Fail to malloc rss action memory");
15299                                 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
15300                                         act_cnt->rss, rss_size,
15301                                         act, &flow_err);
15302                                 if (ret < 0)
15303                                         return -rte_mtr_error_set(error,
15304                                           ENOTSUP,
15305                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15306                                           NULL, "Fail to save "
15307                                           "rss action into policy struct");
15308                                 act_cnt->fate_action =
15309                                         MLX5_FLOW_FATE_SHARED_RSS;
15310                                 action_flags |= MLX5_FLOW_ACTION_RSS;
15311                                 break;
15312                         }
15313                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
15314                         {
15315                                 struct mlx5_flow_dv_port_id_action_resource
15316                                         port_id_resource;
15317                                 uint32_t port_id = 0;
15318
15319                                 if (i >= MLX5_MTR_RTE_COLORS)
15320                                         return -rte_mtr_error_set(error,
15321                                         ENOTSUP,
15322                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15323                                         NULL, "cannot create policy "
15324                                         "port action for this color");
15325                                 memset(&port_id_resource, 0,
15326                                         sizeof(port_id_resource));
15327                                 if (flow_dv_translate_action_port_id(dev, act,
15328                                                 &port_id, &flow_err))
15329                                         return -rte_mtr_error_set(error,
15330                                         ENOTSUP,
15331                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15332                                         NULL, "cannot translate "
15333                                         "policy port action");
15334                                 port_id_resource.port_id = port_id;
15335                                 if (flow_dv_port_id_action_resource_register
15336                                         (dev, &port_id_resource,
15337                                         &dev_flow, &flow_err))
15338                                         return -rte_mtr_error_set(error,
15339                                         ENOTSUP,
15340                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15341                                         NULL, "cannot setup "
15342                                         "policy port action");
15343                                 act_cnt->rix_port_id_action =
15344                                         dev_flow.handle->rix_port_id_action;
15345                                 act_cnt->fate_action =
15346                                         MLX5_FLOW_FATE_PORT_ID;
15347                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
15348                                 break;
15349                         }
15350                         case RTE_FLOW_ACTION_TYPE_JUMP:
15351                         {
15352                                 uint32_t jump_group = 0;
15353                                 uint32_t table = 0;
15354                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15355                                 struct flow_grp_info grp_info = {
15356                                         .external = !!dev_flow.external,
15357                                         .transfer = !!transfer,
15358                                         .fdb_def_rule = !!priv->fdb_def_rule,
15359                                         .std_tbl_fix = 0,
15360                                         .skip_scale = dev_flow.skip_scale &
15361                                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
15362                                 };
15363                                 struct mlx5_flow_meter_sub_policy *sub_policy =
15364                                 mtr_policy->sub_policys[domain][0];
15365
15366                                 if (i >= MLX5_MTR_RTE_COLORS)
15367                                         return -rte_mtr_error_set(error,
15368                                           ENOTSUP,
15369                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15370                                           NULL,
15371                                           "cannot create policy "
15372                                           "jump action for this color");
15373                                 jump_group =
15374                                 ((const struct rte_flow_action_jump *)
15375                                                         act->conf)->group;
15376                                 if (mlx5_flow_group_to_table(dev, NULL,
15377                                                        jump_group,
15378                                                        &table,
15379                                                        &grp_info, &flow_err))
15380                                         return -rte_mtr_error_set(error,
15381                                         ENOTSUP,
15382                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15383                                         NULL, "cannot setup "
15384                                         "policy jump action");
15385                                 sub_policy->jump_tbl[i] =
15386                                 flow_dv_tbl_resource_get(dev,
15387                                         table, egress,
15388                                         transfer,
15389                                         !!dev_flow.external,
15390                                         NULL, jump_group, 0,
15391                                         0, &flow_err);
15392                                 if
15393                                 (!sub_policy->jump_tbl[i])
15394                                         return  -rte_mtr_error_set(error,
15395                                         ENOTSUP,
15396                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15397                                         NULL, "cannot create jump action.");
15398                                 tbl_data = container_of
15399                                 (sub_policy->jump_tbl[i],
15400                                 struct mlx5_flow_tbl_data_entry, tbl);
15401                                 act_cnt->dr_jump_action[domain] =
15402                                         tbl_data->jump.action;
15403                                 act_cnt->fate_action =
15404                                         MLX5_FLOW_FATE_JUMP;
15405                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
15406                                 break;
15407                         }
15408                         case RTE_FLOW_ACTION_TYPE_METER:
15409                         {
15410                                 const struct rte_flow_action_meter *mtr;
15411                                 struct mlx5_flow_meter_info *next_fm;
15412                                 struct mlx5_flow_meter_policy *next_policy;
15413                                 struct rte_flow_action tag_action;
15414                                 struct mlx5_rte_flow_action_set_tag set_tag;
15415                                 uint32_t next_mtr_idx = 0;
15416
15417                                 mtr = act->conf;
15418                                 next_fm = mlx5_flow_meter_find(priv,
15419                                                         mtr->mtr_id,
15420                                                         &next_mtr_idx);
15421                                 if (!next_fm)
15422                                         return -rte_mtr_error_set(error, EINVAL,
15423                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15424                                                 "Fail to find next meter.");
15425                                 if (next_fm->def_policy)
15426                                         return -rte_mtr_error_set(error, EINVAL,
15427                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15428                                 "Hierarchy only supports termination meter.");
15429                                 next_policy = mlx5_flow_meter_policy_find(dev,
15430                                                 next_fm->policy_id, NULL);
15431                                 MLX5_ASSERT(next_policy);
15432                                 if (next_fm->drop_cnt) {
15433                                         set_tag.id =
15434                                                 (enum modify_reg)
15435                                                 mlx5_flow_get_reg_id(dev,
15436                                                 MLX5_MTR_ID,
15437                                                 0,
15438                                                 (struct rte_flow_error *)error);
15439                                         set_tag.offset = (priv->mtr_reg_share ?
15440                                                 MLX5_MTR_COLOR_BITS : 0);
15441                                         set_tag.length = (priv->mtr_reg_share ?
15442                                                MLX5_MTR_IDLE_BITS_IN_COLOR_REG :
15443                                                MLX5_REG_BITS);
15444                                         set_tag.data = next_mtr_idx;
15445                                         tag_action.type =
15446                                                 (enum rte_flow_action_type)
15447                                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG;
15448                                         tag_action.conf = &set_tag;
15449                                         if (flow_dv_convert_action_set_reg
15450                                                 (mhdr_res, &tag_action,
15451                                                 (struct rte_flow_error *)error))
15452                                                 return -rte_errno;
15453                                         action_flags |=
15454                                                 MLX5_FLOW_ACTION_SET_TAG;
15455                                 }
15456                                 act_cnt->fate_action = MLX5_FLOW_FATE_MTR;
15457                                 act_cnt->next_mtr_id = next_fm->meter_id;
15458                                 act_cnt->next_sub_policy = NULL;
15459                                 mtr_policy->is_hierarchy = 1;
15460                                 mtr_policy->dev = next_policy->dev;
15461                                 action_flags |=
15462                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
15463                                 break;
15464                         }
15465                         default:
15466                                 return -rte_mtr_error_set(error, ENOTSUP,
15467                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15468                                           NULL, "action type not supported");
15469                         }
15470                         if (action_flags & MLX5_FLOW_ACTION_SET_TAG) {
15471                                 /* create modify action if needed. */
15472                                 dev_flow.dv.group = 1;
15473                                 if (flow_dv_modify_hdr_resource_register
15474                                         (dev, mhdr_res, &dev_flow, &flow_err))
15475                                         return -rte_mtr_error_set(error,
15476                                                 ENOTSUP,
15477                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15478                                                 NULL, "cannot register policy "
15479                                                 "set tag action");
15480                                 act_cnt->modify_hdr =
15481                                         dev_flow.handle->dvh.modify_hdr;
15482                         }
15483                 }
15484         }
15485         return 0;
15486 }
15487
15488 /**
15489  * Create policy action per domain, lock free,
15490  * (mutex should be acquired by caller).
15491  * Dispatcher for action type specific call.
15492  *
15493  * @param[in] dev
15494  *   Pointer to the Ethernet device structure.
15495  * @param[in] mtr_policy
15496  *   Meter policy struct.
15497  * @param[in] action
15498  *   Action specification used to create meter actions.
15499  * @param[out] error
15500  *   Perform verbose error reporting if not NULL. Initialized in case of
15501  *   error only.
15502  *
15503  * @return
15504  *   0 on success, otherwise negative errno value.
15505  */
15506 static int
15507 flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
15508                       struct mlx5_flow_meter_policy *mtr_policy,
15509                       const struct rte_flow_action *actions[RTE_COLORS],
15510                       struct rte_mtr_error *error)
15511 {
15512         int ret, i;
15513         uint16_t sub_policy_num;
15514
15515         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15516                 sub_policy_num = (mtr_policy->sub_policy_num >>
15517                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15518                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15519                 if (sub_policy_num) {
15520                         ret = __flow_dv_create_domain_policy_acts(dev,
15521                                 mtr_policy, actions,
15522                                 (enum mlx5_meter_domain)i, error);
15523                         if (ret)
15524                                 return ret;
15525                 }
15526         }
15527         return 0;
15528 }
15529
15530 /**
15531  * Query a DV flow rule for its statistics via DevX.
15532  *
15533  * @param[in] dev
15534  *   Pointer to Ethernet device.
15535  * @param[in] cnt_idx
15536  *   Index to the flow counter.
15537  * @param[out] data
15538  *   Data retrieved by the query.
15539  * @param[out] error
15540  *   Perform verbose error reporting if not NULL.
15541  *
15542  * @return
15543  *   0 on success, a negative errno value otherwise and rte_errno is set.
15544  */
15545 static int
15546 flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
15547                     struct rte_flow_error *error)
15548 {
15549         struct mlx5_priv *priv = dev->data->dev_private;
15550         struct rte_flow_query_count *qc = data;
15551
15552         if (!priv->config.devx)
15553                 return rte_flow_error_set(error, ENOTSUP,
15554                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15555                                           NULL,
15556                                           "counters are not supported");
15557         if (cnt_idx) {
15558                 uint64_t pkts, bytes;
15559                 struct mlx5_flow_counter *cnt;
15560                 int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
15561
15562                 if (err)
15563                         return rte_flow_error_set(error, -err,
15564                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15565                                         NULL, "cannot read counters");
15566                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15567                 qc->hits_set = 1;
15568                 qc->bytes_set = 1;
15569                 qc->hits = pkts - cnt->hits;
15570                 qc->bytes = bytes - cnt->bytes;
15571                 if (qc->reset) {
15572                         cnt->hits = pkts;
15573                         cnt->bytes = bytes;
15574                 }
15575                 return 0;
15576         }
15577         return rte_flow_error_set(error, EINVAL,
15578                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15579                                   NULL,
15580                                   "counters are not available");
15581 }
15582
15583 static int
15584 flow_dv_action_query(struct rte_eth_dev *dev,
15585                      const struct rte_flow_action_handle *handle, void *data,
15586                      struct rte_flow_error *error)
15587 {
15588         struct mlx5_age_param *age_param;
15589         struct rte_flow_query_age *resp;
15590         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15591         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15592         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15593         struct mlx5_priv *priv = dev->data->dev_private;
15594         struct mlx5_aso_ct_action *ct;
15595         uint16_t owner;
15596         uint32_t dev_idx;
15597
15598         switch (type) {
15599         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15600                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
15601                 resp = data;
15602                 resp->aged = __atomic_load_n(&age_param->state,
15603                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
15604                                                                           1 : 0;
15605                 resp->sec_since_last_hit_valid = !resp->aged;
15606                 if (resp->sec_since_last_hit_valid)
15607                         resp->sec_since_last_hit = __atomic_load_n
15608                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15609                 return 0;
15610         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15611                 return flow_dv_query_count(dev, idx, data, error);
15612         case MLX5_INDIRECT_ACTION_TYPE_CT:
15613                 owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15614                 if (owner != PORT_ID(priv))
15615                         return rte_flow_error_set(error, EACCES,
15616                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15617                                         NULL,
15618                                         "CT object owned by another port");
15619                 dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15620                 ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15621                 MLX5_ASSERT(ct);
15622                 if (!ct->refcnt)
15623                         return rte_flow_error_set(error, EFAULT,
15624                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15625                                         NULL,
15626                                         "CT object is inactive");
15627                 ((struct rte_flow_action_conntrack *)data)->peer_port =
15628                                                         ct->peer;
15629                 ((struct rte_flow_action_conntrack *)data)->is_original_dir =
15630                                                         ct->is_original;
15631                 if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))
15632                         return rte_flow_error_set(error, EIO,
15633                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15634                                         NULL,
15635                                         "Failed to query CT context");
15636                 return 0;
15637         default:
15638                 return rte_flow_error_set(error, ENOTSUP,
15639                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15640                                           "action type query not supported");
15641         }
15642 }
15643
15644 /**
15645  * Query a flow rule AGE action for aging information.
15646  *
15647  * @param[in] dev
15648  *   Pointer to Ethernet device.
15649  * @param[in] flow
15650  *   Pointer to the sub flow.
15651  * @param[out] data
15652  *   data retrieved by the query.
15653  * @param[out] error
15654  *   Perform verbose error reporting if not NULL.
15655  *
15656  * @return
15657  *   0 on success, a negative errno value otherwise and rte_errno is set.
15658  */
15659 static int
15660 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
15661                   void *data, struct rte_flow_error *error)
15662 {
15663         struct rte_flow_query_age *resp = data;
15664         struct mlx5_age_param *age_param;
15665
15666         if (flow->age) {
15667                 struct mlx5_aso_age_action *act =
15668                                      flow_aso_age_get_by_idx(dev, flow->age);
15669
15670                 age_param = &act->age_params;
15671         } else if (flow->counter) {
15672                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
15673
15674                 if (!age_param || !age_param->timeout)
15675                         return rte_flow_error_set
15676                                         (error, EINVAL,
15677                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15678                                          NULL, "cannot read age data");
15679         } else {
15680                 return rte_flow_error_set(error, EINVAL,
15681                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15682                                           NULL, "age data not available");
15683         }
15684         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
15685                                      AGE_TMOUT ? 1 : 0;
15686         resp->sec_since_last_hit_valid = !resp->aged;
15687         if (resp->sec_since_last_hit_valid)
15688                 resp->sec_since_last_hit = __atomic_load_n
15689                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15690         return 0;
15691 }
15692
15693 /**
15694  * Query a flow.
15695  *
15696  * @see rte_flow_query()
15697  * @see rte_flow_ops
15698  */
15699 static int
15700 flow_dv_query(struct rte_eth_dev *dev,
15701               struct rte_flow *flow __rte_unused,
15702               const struct rte_flow_action *actions __rte_unused,
15703               void *data __rte_unused,
15704               struct rte_flow_error *error __rte_unused)
15705 {
15706         int ret = -EINVAL;
15707
15708         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
15709                 switch (actions->type) {
15710                 case RTE_FLOW_ACTION_TYPE_VOID:
15711                         break;
15712                 case RTE_FLOW_ACTION_TYPE_COUNT:
15713                         ret = flow_dv_query_count(dev, flow->counter, data,
15714                                                   error);
15715                         break;
15716                 case RTE_FLOW_ACTION_TYPE_AGE:
15717                         ret = flow_dv_query_age(dev, flow, data, error);
15718                         break;
15719                 default:
15720                         return rte_flow_error_set(error, ENOTSUP,
15721                                                   RTE_FLOW_ERROR_TYPE_ACTION,
15722                                                   actions,
15723                                                   "action not supported");
15724                 }
15725         }
15726         return ret;
15727 }
15728
15729 /**
15730  * Destroy the meter table set.
15731  * Lock free, (mutex should be acquired by caller).
15732  *
15733  * @param[in] dev
15734  *   Pointer to Ethernet device.
15735  * @param[in] fm
15736  *   Meter information table.
15737  */
15738 static void
15739 flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
15740                         struct mlx5_flow_meter_info *fm)
15741 {
15742         struct mlx5_priv *priv = dev->data->dev_private;
15743         int i;
15744
15745         if (!fm || !priv->config.dv_flow_en)
15746                 return;
15747         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15748                 if (fm->drop_rule[i]) {
15749                         claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
15750                         fm->drop_rule[i] = NULL;
15751                 }
15752         }
15753 }
15754
15755 static void
15756 flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
15757 {
15758         struct mlx5_priv *priv = dev->data->dev_private;
15759         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15760         struct mlx5_flow_tbl_data_entry *tbl;
15761         int i, j;
15762
15763         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15764                 if (mtrmng->def_rule[i]) {
15765                         claim_zero(mlx5_flow_os_destroy_flow
15766                                         (mtrmng->def_rule[i]));
15767                         mtrmng->def_rule[i] = NULL;
15768                 }
15769                 if (mtrmng->def_matcher[i]) {
15770                         tbl = container_of(mtrmng->def_matcher[i]->tbl,
15771                                 struct mlx5_flow_tbl_data_entry, tbl);
15772                         mlx5_list_unregister(tbl->matchers,
15773                                              &mtrmng->def_matcher[i]->entry);
15774                         mtrmng->def_matcher[i] = NULL;
15775                 }
15776                 for (j = 0; j < MLX5_REG_BITS; j++) {
15777                         if (mtrmng->drop_matcher[i][j]) {
15778                                 tbl =
15779                                 container_of(mtrmng->drop_matcher[i][j]->tbl,
15780                                              struct mlx5_flow_tbl_data_entry,
15781                                              tbl);
15782                                 mlx5_list_unregister(tbl->matchers,
15783                                             &mtrmng->drop_matcher[i][j]->entry);
15784                                 mtrmng->drop_matcher[i][j] = NULL;
15785                         }
15786                 }
15787                 if (mtrmng->drop_tbl[i]) {
15788                         flow_dv_tbl_resource_release(MLX5_SH(dev),
15789                                 mtrmng->drop_tbl[i]);
15790                         mtrmng->drop_tbl[i] = NULL;
15791                 }
15792         }
15793 }
15794
15795 /* Number of meter flow actions, count and jump or count and drop. */
15796 #define METER_ACTIONS 2
15797
15798 static void
15799 __flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
15800                               enum mlx5_meter_domain domain)
15801 {
15802         struct mlx5_priv *priv = dev->data->dev_private;
15803         struct mlx5_flow_meter_def_policy *def_policy =
15804                         priv->sh->mtrmng->def_policy[domain];
15805
15806         __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
15807         mlx5_free(def_policy);
15808         priv->sh->mtrmng->def_policy[domain] = NULL;
15809 }
15810
15811 /**
15812  * Destroy the default policy table set.
15813  *
15814  * @param[in] dev
15815  *   Pointer to Ethernet device.
15816  */
15817 static void
15818 flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
15819 {
15820         struct mlx5_priv *priv = dev->data->dev_private;
15821         int i;
15822
15823         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
15824                 if (priv->sh->mtrmng->def_policy[i])
15825                         __flow_dv_destroy_domain_def_policy(dev,
15826                                         (enum mlx5_meter_domain)i);
15827         priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
15828 }
15829
15830 static int
15831 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
15832                         uint32_t color_reg_c_idx,
15833                         enum rte_color color, void *matcher_object,
15834                         int actions_n, void *actions,
15835                         bool match_src_port, const struct rte_flow_item *item,
15836                         void **rule, const struct rte_flow_attr *attr)
15837 {
15838         int ret;
15839         struct mlx5_flow_dv_match_params value = {
15840                 .size = sizeof(value.buf),
15841         };
15842         struct mlx5_flow_dv_match_params matcher = {
15843                 .size = sizeof(matcher.buf),
15844         };
15845         struct mlx5_priv *priv = dev->data->dev_private;
15846         uint8_t misc_mask;
15847
15848         if (match_src_port && (priv->representor || priv->master)) {
15849                 if (flow_dv_translate_item_port_id(dev, matcher.buf,
15850                                                    value.buf, item, attr)) {
15851                         DRV_LOG(ERR,
15852                         "Failed to create meter policy flow with port.");
15853                         return -1;
15854                 }
15855         }
15856         flow_dv_match_meta_reg(matcher.buf, value.buf,
15857                                 (enum modify_reg)color_reg_c_idx,
15858                                 rte_col_2_mlx5_col(color),
15859                                 UINT32_MAX);
15860         misc_mask = flow_dv_matcher_enable(value.buf);
15861         __flow_dv_adjust_buf_size(&value.size, misc_mask);
15862         ret = mlx5_flow_os_create_flow(matcher_object,
15863                         (void *)&value, actions_n, actions, rule);
15864         if (ret) {
15865                 DRV_LOG(ERR, "Failed to create meter policy flow.");
15866                 return -1;
15867         }
15868         return 0;
15869 }
15870
15871 static int
15872 __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
15873                         uint32_t color_reg_c_idx,
15874                         uint16_t priority,
15875                         struct mlx5_flow_meter_sub_policy *sub_policy,
15876                         const struct rte_flow_attr *attr,
15877                         bool match_src_port,
15878                         const struct rte_flow_item *item,
15879                         struct mlx5_flow_dv_matcher **policy_matcher,
15880                         struct rte_flow_error *error)
15881 {
15882         struct mlx5_list_entry *entry;
15883         struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
15884         struct mlx5_flow_dv_matcher matcher = {
15885                 .mask = {
15886                         .size = sizeof(matcher.mask.buf),
15887                 },
15888                 .tbl = tbl_rsc,
15889         };
15890         struct mlx5_flow_dv_match_params value = {
15891                 .size = sizeof(value.buf),
15892         };
15893         struct mlx5_flow_cb_ctx ctx = {
15894                 .error = error,
15895                 .data = &matcher,
15896         };
15897         struct mlx5_flow_tbl_data_entry *tbl_data;
15898         struct mlx5_priv *priv = dev->data->dev_private;
15899         uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
15900
15901         if (match_src_port && (priv->representor || priv->master)) {
15902                 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
15903                                                    value.buf, item, attr)) {
15904                         DRV_LOG(ERR,
15905                         "Failed to register meter drop matcher with port.");
15906                         return -1;
15907                 }
15908         }
15909         tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
15910         if (priority < RTE_COLOR_RED)
15911                 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
15912                         (enum modify_reg)color_reg_c_idx, 0, color_mask);
15913         matcher.priority = priority;
15914         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
15915                                         matcher.mask.size);
15916         entry = mlx5_list_register(tbl_data->matchers, &ctx);
15917         if (!entry) {
15918                 DRV_LOG(ERR, "Failed to register meter drop matcher.");
15919                 return -1;
15920         }
15921         *policy_matcher =
15922                 container_of(entry, struct mlx5_flow_dv_matcher, entry);
15923         return 0;
15924 }
15925
15926 /**
15927  * Create the policy rules per domain.
15928  *
15929  * @param[in] dev
15930  *   Pointer to Ethernet device.
15931  * @param[in] sub_policy
15932  *    Pointer to sub policy table..
15933  * @param[in] egress
15934  *   Direction of the table.
15935  * @param[in] transfer
15936  *   E-Switch or NIC flow.
15937  * @param[in] acts
15938  *   Pointer to policy action list per color.
15939  *
15940  * @return
15941  *   0 on success, -1 otherwise.
15942  */
15943 static int
15944 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
15945                 struct mlx5_flow_meter_sub_policy *sub_policy,
15946                 uint8_t egress, uint8_t transfer, bool match_src_port,
15947                 struct mlx5_meter_policy_acts acts[RTE_COLORS])
15948 {
15949         struct mlx5_priv *priv = dev->data->dev_private;
15950         struct rte_flow_error flow_err;
15951         uint32_t color_reg_c_idx;
15952         struct rte_flow_attr attr = {
15953                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
15954                 .priority = 0,
15955                 .ingress = 0,
15956                 .egress = !!egress,
15957                 .transfer = !!transfer,
15958                 .reserved = 0,
15959         };
15960         int i;
15961         int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
15962         struct mlx5_sub_policy_color_rule *color_rule;
15963
15964         if (ret < 0)
15965                 return -1;
15966         /* Create policy table with POLICY level. */
15967         if (!sub_policy->tbl_rsc)
15968                 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
15969                                 MLX5_FLOW_TABLE_LEVEL_POLICY,
15970                                 egress, transfer, false, NULL, 0, 0,
15971                                 sub_policy->idx, &flow_err);
15972         if (!sub_policy->tbl_rsc) {
15973                 DRV_LOG(ERR,
15974                         "Failed to create meter sub policy table.");
15975                 return -1;
15976         }
15977         /* Prepare matchers. */
15978         color_reg_c_idx = ret;
15979         for (i = 0; i < RTE_COLORS; i++) {
15980                 TAILQ_INIT(&sub_policy->color_rules[i]);
15981                 if (i == RTE_COLOR_YELLOW || !acts[i].actions_n)
15982                         continue;
15983                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
15984                                 sizeof(struct mlx5_sub_policy_color_rule),
15985                                 0, SOCKET_ID_ANY);
15986                 if (!color_rule) {
15987                         DRV_LOG(ERR, "No memory to create color rule.");
15988                         goto err_exit;
15989                 }
15990                 color_rule->src_port = priv->representor_id;
15991                 attr.priority = i;
15992                 /* Create matchers for Color. */
15993                 if (__flow_dv_create_policy_matcher(dev,
15994                                 color_reg_c_idx, i, sub_policy, &attr,
15995                                 (i != RTE_COLOR_RED ? match_src_port : false),
15996                                 NULL, &color_rule->matcher, &flow_err)) {
15997                         DRV_LOG(ERR, "Failed to create color matcher.");
15998                         goto err_exit;
15999                 }
16000                 /* Create flow, matching color. */
16001                 if (__flow_dv_create_policy_flow(dev,
16002                                 color_reg_c_idx, (enum rte_color)i,
16003                                 color_rule->matcher->matcher_object,
16004                                 acts[i].actions_n,
16005                                 acts[i].dv_actions,
16006                                 (i != RTE_COLOR_RED ? match_src_port : false),
16007                                 NULL, &color_rule->rule,
16008                                 &attr)) {
16009                         DRV_LOG(ERR, "Failed to create color rule.");
16010                         goto err_exit;
16011                 }
16012                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
16013                                   color_rule, next_port);
16014         }
16015         return 0;
16016 err_exit:
16017         if (color_rule) {
16018                 if (color_rule->rule)
16019                         mlx5_flow_os_destroy_flow(color_rule->rule);
16020                 if (color_rule->matcher) {
16021                         struct mlx5_flow_tbl_data_entry *tbl =
16022                                 container_of(color_rule->matcher->tbl,
16023                                                 typeof(*tbl), tbl);
16024                         mlx5_list_unregister(tbl->matchers,
16025                                                 &color_rule->matcher->entry);
16026                 }
16027                 mlx5_free(color_rule);
16028         }
16029         return -1;
16030 }
16031
16032 static int
16033 __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
16034                         struct mlx5_flow_meter_policy *mtr_policy,
16035                         struct mlx5_flow_meter_sub_policy *sub_policy,
16036                         uint32_t domain)
16037 {
16038         struct mlx5_priv *priv = dev->data->dev_private;
16039         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16040         struct mlx5_flow_dv_tag_resource *tag;
16041         struct mlx5_flow_dv_port_id_action_resource *port_action;
16042         struct mlx5_hrxq *hrxq;
16043         struct mlx5_flow_meter_info *next_fm = NULL;
16044         struct mlx5_flow_meter_policy *next_policy;
16045         struct mlx5_flow_meter_sub_policy *next_sub_policy;
16046         struct mlx5_flow_tbl_data_entry *tbl_data;
16047         struct rte_flow_error error;
16048         uint8_t egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16049         uint8_t transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16050         bool mtr_first = egress || (transfer && priv->representor_id != UINT16_MAX);
16051         bool match_src_port = false;
16052         int i;
16053
16054         for (i = 0; i < RTE_COLORS; i++) {
16055                 acts[i].actions_n = 0;
16056                 if (i == RTE_COLOR_YELLOW)
16057                         continue;
16058                 if (i == RTE_COLOR_RED) {
16059                         /* Only support drop on red. */
16060                         acts[i].dv_actions[0] =
16061                         mtr_policy->dr_drop_action[domain];
16062                         acts[i].actions_n = 1;
16063                         continue;
16064                 }
16065                 if (mtr_policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR) {
16066                         struct rte_flow_attr attr = {
16067                                 .transfer = transfer
16068                         };
16069
16070                         next_fm = mlx5_flow_meter_find(priv,
16071                                         mtr_policy->act_cnt[i].next_mtr_id,
16072                                         NULL);
16073                         if (!next_fm) {
16074                                 DRV_LOG(ERR,
16075                                         "Failed to get next hierarchy meter.");
16076                                 goto err_exit;
16077                         }
16078                         if (mlx5_flow_meter_attach(priv, next_fm,
16079                                                    &attr, &error)) {
16080                                 DRV_LOG(ERR, "%s", error.message);
16081                                 next_fm = NULL;
16082                                 goto err_exit;
16083                         }
16084                         /* Meter action must be the first for TX. */
16085                         if (mtr_first) {
16086                                 acts[i].dv_actions[acts[i].actions_n] =
16087                                         next_fm->meter_action;
16088                                 acts[i].actions_n++;
16089                         }
16090                 }
16091                 if (mtr_policy->act_cnt[i].rix_mark) {
16092                         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
16093                                         mtr_policy->act_cnt[i].rix_mark);
16094                         if (!tag) {
16095                                 DRV_LOG(ERR, "Failed to find "
16096                                 "mark action for policy.");
16097                                 goto err_exit;
16098                         }
16099                         acts[i].dv_actions[acts[i].actions_n] =
16100                                                 tag->action;
16101                         acts[i].actions_n++;
16102                 }
16103                 if (mtr_policy->act_cnt[i].modify_hdr) {
16104                         acts[i].dv_actions[acts[i].actions_n] =
16105                         mtr_policy->act_cnt[i].modify_hdr->action;
16106                         acts[i].actions_n++;
16107                 }
16108                 if (mtr_policy->act_cnt[i].fate_action) {
16109                         switch (mtr_policy->act_cnt[i].fate_action) {
16110                         case MLX5_FLOW_FATE_PORT_ID:
16111                                 port_action = mlx5_ipool_get
16112                                         (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
16113                                 mtr_policy->act_cnt[i].rix_port_id_action);
16114                                 if (!port_action) {
16115                                         DRV_LOG(ERR, "Failed to find "
16116                                                 "port action for policy.");
16117                                         goto err_exit;
16118                                 }
16119                                 acts[i].dv_actions[acts[i].actions_n] =
16120                                 port_action->action;
16121                                 acts[i].actions_n++;
16122                                 mtr_policy->dev = dev;
16123                                 match_src_port = true;
16124                                 break;
16125                         case MLX5_FLOW_FATE_DROP:
16126                         case MLX5_FLOW_FATE_JUMP:
16127                                 acts[i].dv_actions[acts[i].actions_n] =
16128                                 mtr_policy->act_cnt[i].dr_jump_action[domain];
16129                                 acts[i].actions_n++;
16130                                 break;
16131                         case MLX5_FLOW_FATE_SHARED_RSS:
16132                         case MLX5_FLOW_FATE_QUEUE:
16133                                 hrxq = mlx5_ipool_get
16134                                 (priv->sh->ipool[MLX5_IPOOL_HRXQ],
16135                                 sub_policy->rix_hrxq[i]);
16136                                 if (!hrxq) {
16137                                         DRV_LOG(ERR, "Failed to find "
16138                                                 "queue action for policy.");
16139                                         goto err_exit;
16140                                 }
16141                                 acts[i].dv_actions[acts[i].actions_n] =
16142                                 hrxq->action;
16143                                 acts[i].actions_n++;
16144                                 break;
16145                         case MLX5_FLOW_FATE_MTR:
16146                                 if (!next_fm) {
16147                                         DRV_LOG(ERR,
16148                                                 "No next hierarchy meter.");
16149                                         goto err_exit;
16150                                 }
16151                                 if (!mtr_first) {
16152                                         acts[i].dv_actions[acts[i].actions_n] =
16153                                                         next_fm->meter_action;
16154                                         acts[i].actions_n++;
16155                                 }
16156                                 if (mtr_policy->act_cnt[i].next_sub_policy) {
16157                                         next_sub_policy =
16158                                         mtr_policy->act_cnt[i].next_sub_policy;
16159                                 } else {
16160                                         next_policy =
16161                                                 mlx5_flow_meter_policy_find(dev,
16162                                                 next_fm->policy_id, NULL);
16163                                         MLX5_ASSERT(next_policy);
16164                                         next_sub_policy =
16165                                         next_policy->sub_policys[domain][0];
16166                                 }
16167                                 tbl_data =
16168                                         container_of(next_sub_policy->tbl_rsc,
16169                                         struct mlx5_flow_tbl_data_entry, tbl);
16170                                 acts[i].dv_actions[acts[i].actions_n++] =
16171                                                         tbl_data->jump.action;
16172                                 if (mtr_policy->act_cnt[i].modify_hdr)
16173                                         match_src_port = !!transfer;
16174                                 break;
16175                         default:
16176                                 /*Queue action do nothing*/
16177                                 break;
16178                         }
16179                 }
16180         }
16181         if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
16182                                 egress, transfer, match_src_port, acts)) {
16183                 DRV_LOG(ERR,
16184                 "Failed to create policy rules per domain.");
16185                 goto err_exit;
16186         }
16187         return 0;
16188 err_exit:
16189         if (next_fm)
16190                 mlx5_flow_meter_detach(priv, next_fm);
16191         return -1;
16192 }
16193
16194 /**
16195  * Create the policy rules.
16196  *
16197  * @param[in] dev
16198  *   Pointer to Ethernet device.
16199  * @param[in,out] mtr_policy
16200  *   Pointer to meter policy table.
16201  *
16202  * @return
16203  *   0 on success, -1 otherwise.
16204  */
16205 static int
16206 flow_dv_create_policy_rules(struct rte_eth_dev *dev,
16207                              struct mlx5_flow_meter_policy *mtr_policy)
16208 {
16209         int i;
16210         uint16_t sub_policy_num;
16211
16212         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16213                 sub_policy_num = (mtr_policy->sub_policy_num >>
16214                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
16215                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16216                 if (!sub_policy_num)
16217                         continue;
16218                 /* Prepare actions list and create policy rules. */
16219                 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16220                         mtr_policy->sub_policys[i][0], i)) {
16221                         DRV_LOG(ERR,
16222                         "Failed to create policy action list per domain.");
16223                         return -1;
16224                 }
16225         }
16226         return 0;
16227 }
16228
16229 static int
16230 __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
16231 {
16232         struct mlx5_priv *priv = dev->data->dev_private;
16233         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16234         struct mlx5_flow_meter_def_policy *def_policy;
16235         struct mlx5_flow_tbl_resource *jump_tbl;
16236         struct mlx5_flow_tbl_data_entry *tbl_data;
16237         uint8_t egress, transfer;
16238         struct rte_flow_error error;
16239         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16240         int ret;
16241
16242         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16243         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16244         def_policy = mtrmng->def_policy[domain];
16245         if (!def_policy) {
16246                 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
16247                         sizeof(struct mlx5_flow_meter_def_policy),
16248                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
16249                 if (!def_policy) {
16250                         DRV_LOG(ERR, "Failed to alloc "
16251                                         "default policy table.");
16252                         goto def_policy_error;
16253                 }
16254                 mtrmng->def_policy[domain] = def_policy;
16255                 /* Create the meter suffix table with SUFFIX level. */
16256                 jump_tbl = flow_dv_tbl_resource_get(dev,
16257                                 MLX5_FLOW_TABLE_LEVEL_METER,
16258                                 egress, transfer, false, NULL, 0,
16259                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
16260                 if (!jump_tbl) {
16261                         DRV_LOG(ERR,
16262                                 "Failed to create meter suffix table.");
16263                         goto def_policy_error;
16264                 }
16265                 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
16266                 tbl_data = container_of(jump_tbl,
16267                                 struct mlx5_flow_tbl_data_entry, tbl);
16268                 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
16269                                                 tbl_data->jump.action;
16270                 acts[RTE_COLOR_GREEN].dv_actions[0] =
16271                                                 tbl_data->jump.action;
16272                 acts[RTE_COLOR_GREEN].actions_n = 1;
16273                 /* Create jump action to the drop table. */
16274                 if (!mtrmng->drop_tbl[domain]) {
16275                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
16276                                 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
16277                                 egress, transfer, false, NULL, 0,
16278                                 0, MLX5_MTR_TABLE_ID_DROP, &error);
16279                         if (!mtrmng->drop_tbl[domain]) {
16280                                 DRV_LOG(ERR, "Failed to create "
16281                                 "meter drop table for default policy.");
16282                                 goto def_policy_error;
16283                         }
16284                 }
16285                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16286                                 struct mlx5_flow_tbl_data_entry, tbl);
16287                 def_policy->dr_jump_action[RTE_COLOR_RED] =
16288                                                 tbl_data->jump.action;
16289                 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
16290                 acts[RTE_COLOR_RED].actions_n = 1;
16291                 /* Create default policy rules. */
16292                 ret = __flow_dv_create_domain_policy_rules(dev,
16293                                         &def_policy->sub_policy,
16294                                         egress, transfer, false, acts);
16295                 if (ret) {
16296                         DRV_LOG(ERR, "Failed to create "
16297                                 "default policy rules.");
16298                                 goto def_policy_error;
16299                 }
16300         }
16301         return 0;
16302 def_policy_error:
16303         __flow_dv_destroy_domain_def_policy(dev,
16304                         (enum mlx5_meter_domain)domain);
16305         return -1;
16306 }
16307
16308 /**
16309  * Create the default policy table set.
16310  *
16311  * @param[in] dev
16312  *   Pointer to Ethernet device.
16313  * @return
16314  *   0 on success, -1 otherwise.
16315  */
16316 static int
16317 flow_dv_create_def_policy(struct rte_eth_dev *dev)
16318 {
16319         struct mlx5_priv *priv = dev->data->dev_private;
16320         int i;
16321
16322         /* Non-termination policy table. */
16323         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16324                 if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
16325                         continue;
16326                 if (__flow_dv_create_domain_def_policy(dev, i)) {
16327                         DRV_LOG(ERR,
16328                         "Failed to create default policy");
16329                         return -1;
16330                 }
16331         }
16332         return 0;
16333 }
16334
16335 /**
16336  * Create the needed meter tables.
16337  * Lock free, (mutex should be acquired by caller).
16338  *
16339  * @param[in] dev
16340  *   Pointer to Ethernet device.
16341  * @param[in] fm
16342  *   Meter information table.
16343  * @param[in] mtr_idx
16344  *   Meter index.
16345  * @param[in] domain_bitmap
16346  *   Domain bitmap.
16347  * @return
16348  *   0 on success, -1 otherwise.
16349  */
16350 static int
16351 flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
16352                         struct mlx5_flow_meter_info *fm,
16353                         uint32_t mtr_idx,
16354                         uint8_t domain_bitmap)
16355 {
16356         struct mlx5_priv *priv = dev->data->dev_private;
16357         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16358         struct rte_flow_error error;
16359         struct mlx5_flow_tbl_data_entry *tbl_data;
16360         uint8_t egress, transfer;
16361         void *actions[METER_ACTIONS];
16362         int domain, ret, i;
16363         struct mlx5_flow_counter *cnt;
16364         struct mlx5_flow_dv_match_params value = {
16365                 .size = sizeof(value.buf),
16366         };
16367         struct mlx5_flow_dv_match_params matcher_para = {
16368                 .size = sizeof(matcher_para.buf),
16369         };
16370         int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
16371                                                      0, &error);
16372         uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
16373         uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
16374         struct mlx5_list_entry *entry;
16375         struct mlx5_flow_dv_matcher matcher = {
16376                 .mask = {
16377                         .size = sizeof(matcher.mask.buf),
16378                 },
16379         };
16380         struct mlx5_flow_dv_matcher *drop_matcher;
16381         struct mlx5_flow_cb_ctx ctx = {
16382                 .error = &error,
16383                 .data = &matcher,
16384         };
16385         uint8_t misc_mask;
16386
16387         if (!priv->mtr_en || mtr_id_reg_c < 0) {
16388                 rte_errno = ENOTSUP;
16389                 return -1;
16390         }
16391         for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
16392                 if (!(domain_bitmap & (1 << domain)) ||
16393                         (mtrmng->def_rule[domain] && !fm->drop_cnt))
16394                         continue;
16395                 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16396                 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16397                 /* Create the drop table with METER DROP level. */
16398                 if (!mtrmng->drop_tbl[domain]) {
16399                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
16400                                         MLX5_FLOW_TABLE_LEVEL_METER,
16401                                         egress, transfer, false, NULL, 0,
16402                                         0, MLX5_MTR_TABLE_ID_DROP, &error);
16403                         if (!mtrmng->drop_tbl[domain]) {
16404                                 DRV_LOG(ERR, "Failed to create meter drop table.");
16405                                 goto policy_error;
16406                         }
16407                 }
16408                 /* Create default matcher in drop table. */
16409                 matcher.tbl = mtrmng->drop_tbl[domain],
16410                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16411                                 struct mlx5_flow_tbl_data_entry, tbl);
16412                 if (!mtrmng->def_matcher[domain]) {
16413                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16414                                        (enum modify_reg)mtr_id_reg_c,
16415                                        0, 0);
16416                         matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
16417                         matcher.crc = rte_raw_cksum
16418                                         ((const void *)matcher.mask.buf,
16419                                         matcher.mask.size);
16420                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16421                         if (!entry) {
16422                                 DRV_LOG(ERR, "Failed to register meter "
16423                                 "drop default matcher.");
16424                                 goto policy_error;
16425                         }
16426                         mtrmng->def_matcher[domain] = container_of(entry,
16427                         struct mlx5_flow_dv_matcher, entry);
16428                 }
16429                 /* Create default rule in drop table. */
16430                 if (!mtrmng->def_rule[domain]) {
16431                         i = 0;
16432                         actions[i++] = priv->sh->dr_drop_action;
16433                         flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16434                                 (enum modify_reg)mtr_id_reg_c, 0, 0);
16435                         misc_mask = flow_dv_matcher_enable(value.buf);
16436                         __flow_dv_adjust_buf_size(&value.size, misc_mask);
16437                         ret = mlx5_flow_os_create_flow
16438                                 (mtrmng->def_matcher[domain]->matcher_object,
16439                                 (void *)&value, i, actions,
16440                                 &mtrmng->def_rule[domain]);
16441                         if (ret) {
16442                                 DRV_LOG(ERR, "Failed to create meter "
16443                                 "default drop rule for drop table.");
16444                                 goto policy_error;
16445                         }
16446                 }
16447                 if (!fm->drop_cnt)
16448                         continue;
16449                 MLX5_ASSERT(mtrmng->max_mtr_bits);
16450                 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
16451                         /* Create matchers for Drop. */
16452                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16453                                         (enum modify_reg)mtr_id_reg_c, 0,
16454                                         (mtr_id_mask << mtr_id_offset));
16455                         matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
16456                         matcher.crc = rte_raw_cksum
16457                                         ((const void *)matcher.mask.buf,
16458                                         matcher.mask.size);
16459                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16460                         if (!entry) {
16461                                 DRV_LOG(ERR,
16462                                 "Failed to register meter drop matcher.");
16463                                 goto policy_error;
16464                         }
16465                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
16466                                 container_of(entry, struct mlx5_flow_dv_matcher,
16467                                              entry);
16468                 }
16469                 drop_matcher =
16470                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
16471                 /* Create drop rule, matching meter_id only. */
16472                 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16473                                 (enum modify_reg)mtr_id_reg_c,
16474                                 (mtr_idx << mtr_id_offset), UINT32_MAX);
16475                 i = 0;
16476                 cnt = flow_dv_counter_get_by_idx(dev,
16477                                         fm->drop_cnt, NULL);
16478                 actions[i++] = cnt->action;
16479                 actions[i++] = priv->sh->dr_drop_action;
16480                 misc_mask = flow_dv_matcher_enable(value.buf);
16481                 __flow_dv_adjust_buf_size(&value.size, misc_mask);
16482                 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
16483                                                (void *)&value, i, actions,
16484                                                &fm->drop_rule[domain]);
16485                 if (ret) {
16486                         DRV_LOG(ERR, "Failed to create meter "
16487                                 "drop rule for drop table.");
16488                                 goto policy_error;
16489                 }
16490         }
16491         return 0;
16492 policy_error:
16493         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16494                 if (fm->drop_rule[i]) {
16495                         claim_zero(mlx5_flow_os_destroy_flow
16496                                 (fm->drop_rule[i]));
16497                         fm->drop_rule[i] = NULL;
16498                 }
16499         }
16500         return -1;
16501 }
16502
16503 static struct mlx5_flow_meter_sub_policy *
16504 __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
16505                 struct mlx5_flow_meter_policy *mtr_policy,
16506                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS],
16507                 struct mlx5_flow_meter_sub_policy *next_sub_policy,
16508                 bool *is_reuse)
16509 {
16510         struct mlx5_priv *priv = dev->data->dev_private;
16511         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16512         uint32_t sub_policy_idx = 0;
16513         uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
16514         uint32_t i, j;
16515         struct mlx5_hrxq *hrxq;
16516         struct mlx5_flow_handle dh;
16517         struct mlx5_meter_policy_action_container *act_cnt;
16518         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16519         uint16_t sub_policy_num;
16520
16521         rte_spinlock_lock(&mtr_policy->sl);
16522         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16523                 if (!rss_desc[i])
16524                         continue;
16525                 hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
16526                 if (!hrxq_idx[i]) {
16527                         rte_spinlock_unlock(&mtr_policy->sl);
16528                         return NULL;
16529                 }
16530         }
16531         sub_policy_num = (mtr_policy->sub_policy_num >>
16532                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16533                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16534         for (i = 0; i < sub_policy_num;
16535                 i++) {
16536                 for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
16537                         if (rss_desc[j] &&
16538                                 hrxq_idx[j] !=
16539                         mtr_policy->sub_policys[domain][i]->rix_hrxq[j])
16540                                 break;
16541                 }
16542                 if (j >= MLX5_MTR_RTE_COLORS) {
16543                         /*
16544                          * Found the sub policy table with
16545                          * the same queue per color
16546                          */
16547                         rte_spinlock_unlock(&mtr_policy->sl);
16548                         for (j = 0; j < MLX5_MTR_RTE_COLORS; j++)
16549                                 mlx5_hrxq_release(dev, hrxq_idx[j]);
16550                         *is_reuse = true;
16551                         return mtr_policy->sub_policys[domain][i];
16552                 }
16553         }
16554         /* Create sub policy. */
16555         if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
16556                 /* Reuse the first dummy sub_policy*/
16557                 sub_policy = mtr_policy->sub_policys[domain][0];
16558                 sub_policy_idx = sub_policy->idx;
16559         } else {
16560                 sub_policy = mlx5_ipool_zmalloc
16561                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16562                                 &sub_policy_idx);
16563                 if (!sub_policy ||
16564                         sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
16565                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16566                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
16567                         goto rss_sub_policy_error;
16568                 }
16569                 sub_policy->idx = sub_policy_idx;
16570                 sub_policy->main_policy = mtr_policy;
16571         }
16572         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16573                 if (!rss_desc[i])
16574                         continue;
16575                 sub_policy->rix_hrxq[i] = hrxq_idx[i];
16576                 if (mtr_policy->is_hierarchy) {
16577                         act_cnt = &mtr_policy->act_cnt[i];
16578                         act_cnt->next_sub_policy = next_sub_policy;
16579                         mlx5_hrxq_release(dev, hrxq_idx[i]);
16580                 } else {
16581                         /*
16582                          * Overwrite the last action from
16583                          * RSS action to Queue action.
16584                          */
16585                         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
16586                                 hrxq_idx[i]);
16587                         if (!hrxq) {
16588                                 DRV_LOG(ERR, "Failed to create policy hrxq");
16589                                 goto rss_sub_policy_error;
16590                         }
16591                         act_cnt = &mtr_policy->act_cnt[i];
16592                         if (act_cnt->rix_mark || act_cnt->modify_hdr) {
16593                                 memset(&dh, 0, sizeof(struct mlx5_flow_handle));
16594                                 if (act_cnt->rix_mark)
16595                                         dh.mark = 1;
16596                                 dh.fate_action = MLX5_FLOW_FATE_QUEUE;
16597                                 dh.rix_hrxq = hrxq_idx[i];
16598                                 flow_drv_rxq_flags_set(dev, &dh);
16599                         }
16600                 }
16601         }
16602         if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16603                 sub_policy, domain)) {
16604                 DRV_LOG(ERR, "Failed to create policy "
16605                         "rules per domain.");
16606                 goto rss_sub_policy_error;
16607         }
16608         if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16609                 i = (mtr_policy->sub_policy_num >>
16610                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16611                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16612                 mtr_policy->sub_policys[domain][i] = sub_policy;
16613                 i++;
16614                 if (i > MLX5_MTR_RSS_MAX_SUB_POLICY)
16615                         goto rss_sub_policy_error;
16616                 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16617                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16618                 mtr_policy->sub_policy_num |=
16619                         (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16620                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16621         }
16622         rte_spinlock_unlock(&mtr_policy->sl);
16623         *is_reuse = false;
16624         return sub_policy;
16625 rss_sub_policy_error:
16626         if (sub_policy) {
16627                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16628                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16629                         i = (mtr_policy->sub_policy_num >>
16630                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16631                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16632                         mtr_policy->sub_policys[domain][i] = NULL;
16633                         mlx5_ipool_free
16634                         (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16635                                         sub_policy->idx);
16636                 }
16637         }
16638         rte_spinlock_unlock(&mtr_policy->sl);
16639         return NULL;
16640 }
16641
16642 /**
16643  * Find the policy table for prefix table with RSS.
16644  *
16645  * @param[in] dev
16646  *   Pointer to Ethernet device.
16647  * @param[in] mtr_policy
16648  *   Pointer to meter policy table.
16649  * @param[in] rss_desc
16650  *   Pointer to rss_desc
16651  * @return
16652  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
16653  */
16654 static struct mlx5_flow_meter_sub_policy *
16655 flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
16656                 struct mlx5_flow_meter_policy *mtr_policy,
16657                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
16658 {
16659         struct mlx5_priv *priv = dev->data->dev_private;
16660         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16661         struct mlx5_flow_meter_info *next_fm;
16662         struct mlx5_flow_meter_policy *next_policy;
16663         struct mlx5_flow_meter_sub_policy *next_sub_policy = NULL;
16664         struct mlx5_flow_meter_policy *policies[MLX5_MTR_CHAIN_MAX_NUM];
16665         struct mlx5_flow_meter_sub_policy *sub_policies[MLX5_MTR_CHAIN_MAX_NUM];
16666         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16667         bool reuse_sub_policy;
16668         uint32_t i = 0;
16669         uint32_t j = 0;
16670
16671         while (true) {
16672                 /* Iterate hierarchy to get all policies in this hierarchy. */
16673                 policies[i++] = mtr_policy;
16674                 if (!mtr_policy->is_hierarchy)
16675                         break;
16676                 if (i >= MLX5_MTR_CHAIN_MAX_NUM) {
16677                         DRV_LOG(ERR, "Exceed max meter number in hierarchy.");
16678                         return NULL;
16679                 }
16680                 next_fm = mlx5_flow_meter_find(priv,
16681                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
16682                 if (!next_fm) {
16683                         DRV_LOG(ERR, "Failed to get next meter in hierarchy.");
16684                         return NULL;
16685                 }
16686                 next_policy =
16687                         mlx5_flow_meter_policy_find(dev, next_fm->policy_id,
16688                                                     NULL);
16689                 MLX5_ASSERT(next_policy);
16690                 mtr_policy = next_policy;
16691         }
16692         while (i) {
16693                 /**
16694                  * From last policy to the first one in hierarchy,
16695                  * create/get the sub policy for each of them.
16696                  */
16697                 sub_policy = __flow_dv_meter_get_rss_sub_policy(dev,
16698                                                         policies[--i],
16699                                                         rss_desc,
16700                                                         next_sub_policy,
16701                                                         &reuse_sub_policy);
16702                 if (!sub_policy) {
16703                         DRV_LOG(ERR, "Failed to get the sub policy.");
16704                         goto err_exit;
16705                 }
16706                 if (!reuse_sub_policy)
16707                         sub_policies[j++] = sub_policy;
16708                 next_sub_policy = sub_policy;
16709         }
16710         return sub_policy;
16711 err_exit:
16712         while (j) {
16713                 uint16_t sub_policy_num;
16714
16715                 sub_policy = sub_policies[--j];
16716                 mtr_policy = sub_policy->main_policy;
16717                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16718                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16719                         sub_policy_num = (mtr_policy->sub_policy_num >>
16720                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16721                                 MLX5_MTR_SUB_POLICY_NUM_MASK;
16722                         mtr_policy->sub_policys[domain][sub_policy_num - 1] =
16723                                                                         NULL;
16724                         sub_policy_num--;
16725                         mtr_policy->sub_policy_num &=
16726                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16727                                   (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i));
16728                         mtr_policy->sub_policy_num |=
16729                         (sub_policy_num & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16730                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i);
16731                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16732                                         sub_policy->idx);
16733                 }
16734         }
16735         return NULL;
16736 }
16737
16738 /**
16739  * Create the sub policy tag rule for all meters in hierarchy.
16740  *
16741  * @param[in] dev
16742  *   Pointer to Ethernet device.
16743  * @param[in] fm
16744  *   Meter information table.
16745  * @param[in] src_port
16746  *   The src port this extra rule should use.
16747  * @param[in] item
16748  *   The src port match item.
16749  * @param[out] error
16750  *   Perform verbose error reporting if not NULL.
16751  * @return
16752  *   0 on success, a negative errno value otherwise and rte_errno is set.
16753  */
16754 static int
16755 flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
16756                                 struct mlx5_flow_meter_info *fm,
16757                                 int32_t src_port,
16758                                 const struct rte_flow_item *item,
16759                                 struct rte_flow_error *error)
16760 {
16761         struct mlx5_priv *priv = dev->data->dev_private;
16762         struct mlx5_flow_meter_policy *mtr_policy;
16763         struct mlx5_flow_meter_sub_policy *sub_policy;
16764         struct mlx5_flow_meter_info *next_fm = NULL;
16765         struct mlx5_flow_meter_policy *next_policy;
16766         struct mlx5_flow_meter_sub_policy *next_sub_policy;
16767         struct mlx5_flow_tbl_data_entry *tbl_data;
16768         struct mlx5_sub_policy_color_rule *color_rule;
16769         struct mlx5_meter_policy_acts acts;
16770         uint32_t color_reg_c_idx;
16771         bool mtr_first = (src_port != UINT16_MAX) ? true : false;
16772         struct rte_flow_attr attr = {
16773                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
16774                 .priority = 0,
16775                 .ingress = 0,
16776                 .egress = 0,
16777                 .transfer = 1,
16778                 .reserved = 0,
16779         };
16780         uint32_t domain = MLX5_MTR_DOMAIN_TRANSFER;
16781         int i;
16782
16783         mtr_policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
16784         MLX5_ASSERT(mtr_policy);
16785         if (!mtr_policy->is_hierarchy)
16786                 return 0;
16787         next_fm = mlx5_flow_meter_find(priv,
16788                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
16789         if (!next_fm) {
16790                 return rte_flow_error_set(error, EINVAL,
16791                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
16792                                 "Failed to find next meter in hierarchy.");
16793         }
16794         if (!next_fm->drop_cnt)
16795                 goto exit;
16796         color_reg_c_idx = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, error);
16797         sub_policy = mtr_policy->sub_policys[domain][0];
16798         for (i = 0; i < RTE_COLORS; i++) {
16799                 bool rule_exist = false;
16800                 struct mlx5_meter_policy_action_container *act_cnt;
16801
16802                 if (i >= RTE_COLOR_YELLOW)
16803                         break;
16804                 TAILQ_FOREACH(color_rule,
16805                               &sub_policy->color_rules[i], next_port)
16806                         if (color_rule->src_port == src_port) {
16807                                 rule_exist = true;
16808                                 break;
16809                         }
16810                 if (rule_exist)
16811                         continue;
16812                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
16813                                 sizeof(struct mlx5_sub_policy_color_rule),
16814                                 0, SOCKET_ID_ANY);
16815                 if (!color_rule)
16816                         return rte_flow_error_set(error, ENOMEM,
16817                                 RTE_FLOW_ERROR_TYPE_ACTION,
16818                                 NULL, "No memory to create tag color rule.");
16819                 color_rule->src_port = src_port;
16820                 attr.priority = i;
16821                 next_policy = mlx5_flow_meter_policy_find(dev,
16822                                                 next_fm->policy_id, NULL);
16823                 MLX5_ASSERT(next_policy);
16824                 next_sub_policy = next_policy->sub_policys[domain][0];
16825                 tbl_data = container_of(next_sub_policy->tbl_rsc,
16826                                         struct mlx5_flow_tbl_data_entry, tbl);
16827                 act_cnt = &mtr_policy->act_cnt[i];
16828                 if (mtr_first) {
16829                         acts.dv_actions[0] = next_fm->meter_action;
16830                         acts.dv_actions[1] = act_cnt->modify_hdr->action;
16831                 } else {
16832                         acts.dv_actions[0] = act_cnt->modify_hdr->action;
16833                         acts.dv_actions[1] = next_fm->meter_action;
16834                 }
16835                 acts.dv_actions[2] = tbl_data->jump.action;
16836                 acts.actions_n = 3;
16837                 if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) {
16838                         next_fm = NULL;
16839                         goto err_exit;
16840                 }
16841                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
16842                                         i, sub_policy, &attr, true, item,
16843                                         &color_rule->matcher, error)) {
16844                         rte_flow_error_set(error, errno,
16845                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16846                                 "Failed to create hierarchy meter matcher.");
16847                         goto err_exit;
16848                 }
16849                 if (__flow_dv_create_policy_flow(dev, color_reg_c_idx,
16850                                         (enum rte_color)i,
16851                                         color_rule->matcher->matcher_object,
16852                                         acts.actions_n, acts.dv_actions,
16853                                         true, item,
16854                                         &color_rule->rule, &attr)) {
16855                         rte_flow_error_set(error, errno,
16856                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16857                                 "Failed to create hierarchy meter rule.");
16858                         goto err_exit;
16859                 }
16860                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
16861                                   color_rule, next_port);
16862         }
16863 exit:
16864         /**
16865          * Recursive call to iterate all meters in hierarchy and
16866          * create needed rules.
16867          */
16868         return flow_dv_meter_hierarchy_rule_create(dev, next_fm,
16869                                                 src_port, item, error);
16870 err_exit:
16871         if (color_rule) {
16872                 if (color_rule->rule)
16873                         mlx5_flow_os_destroy_flow(color_rule->rule);
16874                 if (color_rule->matcher) {
16875                         struct mlx5_flow_tbl_data_entry *tbl =
16876                                 container_of(color_rule->matcher->tbl,
16877                                                 typeof(*tbl), tbl);
16878                         mlx5_list_unregister(tbl->matchers,
16879                                                 &color_rule->matcher->entry);
16880                 }
16881                 mlx5_free(color_rule);
16882         }
16883         if (next_fm)
16884                 mlx5_flow_meter_detach(priv, next_fm);
16885         return -rte_errno;
16886 }
16887
16888 /**
16889  * Destroy the sub policy table with RX queue.
16890  *
16891  * @param[in] dev
16892  *   Pointer to Ethernet device.
16893  * @param[in] mtr_policy
16894  *   Pointer to meter policy table.
16895  */
16896 static void
16897 flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
16898                 struct mlx5_flow_meter_policy *mtr_policy)
16899 {
16900         struct mlx5_priv *priv = dev->data->dev_private;
16901         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16902         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16903         uint32_t i, j;
16904         uint16_t sub_policy_num, new_policy_num;
16905
16906         rte_spinlock_lock(&mtr_policy->sl);
16907         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16908                 switch (mtr_policy->act_cnt[i].fate_action) {
16909                 case MLX5_FLOW_FATE_SHARED_RSS:
16910                         sub_policy_num = (mtr_policy->sub_policy_num >>
16911                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16912                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16913                         new_policy_num = sub_policy_num;
16914                         for (j = 0; j < sub_policy_num; j++) {
16915                                 sub_policy =
16916                                         mtr_policy->sub_policys[domain][j];
16917                                 if (sub_policy) {
16918                                         __flow_dv_destroy_sub_policy_rules(dev,
16919                                                 sub_policy);
16920                                 if (sub_policy !=
16921                                         mtr_policy->sub_policys[domain][0]) {
16922                                         mtr_policy->sub_policys[domain][j] =
16923                                                                 NULL;
16924                                         mlx5_ipool_free
16925                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16926                                                 sub_policy->idx);
16927                                                 new_policy_num--;
16928                                         }
16929                                 }
16930                         }
16931                         if (new_policy_num != sub_policy_num) {
16932                                 mtr_policy->sub_policy_num &=
16933                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16934                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16935                                 mtr_policy->sub_policy_num |=
16936                                 (new_policy_num &
16937                                         MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16938                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16939                         }
16940                         break;
16941                 case MLX5_FLOW_FATE_QUEUE:
16942                         sub_policy = mtr_policy->sub_policys[domain][0];
16943                         __flow_dv_destroy_sub_policy_rules(dev,
16944                                                 sub_policy);
16945                         break;
16946                 default:
16947                         /*Other actions without queue and do nothing*/
16948                         break;
16949                 }
16950         }
16951         rte_spinlock_unlock(&mtr_policy->sl);
16952 }
16953
16954 /**
16955  * Validate the batch counter support in root table.
16956  *
16957  * Create a simple flow with invalid counter and drop action on root table to
16958  * validate if batch counter with offset on root table is supported or not.
16959  *
16960  * @param[in] dev
16961  *   Pointer to rte_eth_dev structure.
16962  *
16963  * @return
16964  *   0 on success, a negative errno value otherwise and rte_errno is set.
16965  */
16966 int
16967 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
16968 {
16969         struct mlx5_priv *priv = dev->data->dev_private;
16970         struct mlx5_dev_ctx_shared *sh = priv->sh;
16971         struct mlx5_flow_dv_match_params mask = {
16972                 .size = sizeof(mask.buf),
16973         };
16974         struct mlx5_flow_dv_match_params value = {
16975                 .size = sizeof(value.buf),
16976         };
16977         struct mlx5dv_flow_matcher_attr dv_attr = {
16978                 .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
16979                 .priority = 0,
16980                 .match_criteria_enable = 0,
16981                 .match_mask = (void *)&mask,
16982         };
16983         void *actions[2] = { 0 };
16984         struct mlx5_flow_tbl_resource *tbl = NULL;
16985         struct mlx5_devx_obj *dcs = NULL;
16986         void *matcher = NULL;
16987         void *flow = NULL;
16988         int ret = -1;
16989
16990         tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
16991                                         0, 0, 0, NULL);
16992         if (!tbl)
16993                 goto err;
16994         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
16995         if (!dcs)
16996                 goto err;
16997         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
16998                                                     &actions[0]);
16999         if (ret)
17000                 goto err;
17001         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17002         __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17003         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
17004                                                &matcher);
17005         if (ret)
17006                 goto err;
17007         __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17008         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17009                                        actions, &flow);
17010 err:
17011         /*
17012          * If batch counter with offset is not supported, the driver will not
17013          * validate the invalid offset value, flow create should success.
17014          * In this case, it means batch counter is not supported in root table.
17015          *
17016          * Otherwise, if flow create is failed, counter offset is supported.
17017          */
17018         if (flow) {
17019                 DRV_LOG(INFO, "Batch counter is not supported in root "
17020                               "table. Switch to fallback mode.");
17021                 rte_errno = ENOTSUP;
17022                 ret = -rte_errno;
17023                 claim_zero(mlx5_flow_os_destroy_flow(flow));
17024         } else {
17025                 /* Check matcher to make sure validate fail at flow create. */
17026                 if (!matcher || (matcher && errno != EINVAL))
17027                         DRV_LOG(ERR, "Unexpected error in counter offset "
17028                                      "support detection");
17029                 ret = 0;
17030         }
17031         if (actions[0])
17032                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
17033         if (matcher)
17034                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
17035         if (tbl)
17036                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
17037         if (dcs)
17038                 claim_zero(mlx5_devx_cmd_destroy(dcs));
17039         return ret;
17040 }
17041
17042 /**
17043  * Query a devx counter.
17044  *
17045  * @param[in] dev
17046  *   Pointer to the Ethernet device structure.
17047  * @param[in] cnt
17048  *   Index to the flow counter.
17049  * @param[in] clear
17050  *   Set to clear the counter statistics.
17051  * @param[out] pkts
17052  *   The statistics value of packets.
17053  * @param[out] bytes
17054  *   The statistics value of bytes.
17055  *
17056  * @return
17057  *   0 on success, otherwise return -1.
17058  */
17059 static int
17060 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
17061                       uint64_t *pkts, uint64_t *bytes)
17062 {
17063         struct mlx5_priv *priv = dev->data->dev_private;
17064         struct mlx5_flow_counter *cnt;
17065         uint64_t inn_pkts, inn_bytes;
17066         int ret;
17067
17068         if (!priv->config.devx)
17069                 return -1;
17070
17071         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
17072         if (ret)
17073                 return -1;
17074         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
17075         *pkts = inn_pkts - cnt->hits;
17076         *bytes = inn_bytes - cnt->bytes;
17077         if (clear) {
17078                 cnt->hits = inn_pkts;
17079                 cnt->bytes = inn_bytes;
17080         }
17081         return 0;
17082 }
17083
17084 /**
17085  * Get aged-out flows.
17086  *
17087  * @param[in] dev
17088  *   Pointer to the Ethernet device structure.
17089  * @param[in] context
17090  *   The address of an array of pointers to the aged-out flows contexts.
17091  * @param[in] nb_contexts
17092  *   The length of context array pointers.
17093  * @param[out] error
17094  *   Perform verbose error reporting if not NULL. Initialized in case of
17095  *   error only.
17096  *
17097  * @return
17098  *   how many contexts get in success, otherwise negative errno value.
17099  *   if nb_contexts is 0, return the amount of all aged contexts.
17100  *   if nb_contexts is not 0 , return the amount of aged flows reported
17101  *   in the context array.
17102  * @note: only stub for now
17103  */
17104 static int
17105 flow_get_aged_flows(struct rte_eth_dev *dev,
17106                     void **context,
17107                     uint32_t nb_contexts,
17108                     struct rte_flow_error *error)
17109 {
17110         struct mlx5_priv *priv = dev->data->dev_private;
17111         struct mlx5_age_info *age_info;
17112         struct mlx5_age_param *age_param;
17113         struct mlx5_flow_counter *counter;
17114         struct mlx5_aso_age_action *act;
17115         int nb_flows = 0;
17116
17117         if (nb_contexts && !context)
17118                 return rte_flow_error_set(error, EINVAL,
17119                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17120                                           NULL, "empty context");
17121         age_info = GET_PORT_AGE_INFO(priv);
17122         rte_spinlock_lock(&age_info->aged_sl);
17123         LIST_FOREACH(act, &age_info->aged_aso, next) {
17124                 nb_flows++;
17125                 if (nb_contexts) {
17126                         context[nb_flows - 1] =
17127                                                 act->age_params.context;
17128                         if (!(--nb_contexts))
17129                                 break;
17130                 }
17131         }
17132         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
17133                 nb_flows++;
17134                 if (nb_contexts) {
17135                         age_param = MLX5_CNT_TO_AGE(counter);
17136                         context[nb_flows - 1] = age_param->context;
17137                         if (!(--nb_contexts))
17138                                 break;
17139                 }
17140         }
17141         rte_spinlock_unlock(&age_info->aged_sl);
17142         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
17143         return nb_flows;
17144 }
17145
17146 /*
17147  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
17148  */
17149 static uint32_t
17150 flow_dv_counter_allocate(struct rte_eth_dev *dev)
17151 {
17152         return flow_dv_counter_alloc(dev, 0);
17153 }
17154
17155 /**
17156  * Validate indirect action.
17157  * Dispatcher for action type specific validation.
17158  *
17159  * @param[in] dev
17160  *   Pointer to the Ethernet device structure.
17161  * @param[in] conf
17162  *   Indirect action configuration.
17163  * @param[in] action
17164  *   The indirect action object to validate.
17165  * @param[out] error
17166  *   Perform verbose error reporting if not NULL. Initialized in case of
17167  *   error only.
17168  *
17169  * @return
17170  *   0 on success, otherwise negative errno value.
17171  */
17172 static int
17173 flow_dv_action_validate(struct rte_eth_dev *dev,
17174                         const struct rte_flow_indir_action_conf *conf,
17175                         const struct rte_flow_action *action,
17176                         struct rte_flow_error *err)
17177 {
17178         struct mlx5_priv *priv = dev->data->dev_private;
17179
17180         RTE_SET_USED(conf);
17181         switch (action->type) {
17182         case RTE_FLOW_ACTION_TYPE_RSS:
17183                 /*
17184                  * priv->obj_ops is set according to driver capabilities.
17185                  * When DevX capabilities are
17186                  * sufficient, it is set to devx_obj_ops.
17187                  * Otherwise, it is set to ibv_obj_ops.
17188                  * ibv_obj_ops doesn't support ind_table_modify operation.
17189                  * In this case the indirect RSS action can't be used.
17190                  */
17191                 if (priv->obj_ops.ind_table_modify == NULL)
17192                         return rte_flow_error_set
17193                                         (err, ENOTSUP,
17194                                          RTE_FLOW_ERROR_TYPE_ACTION,
17195                                          NULL,
17196                                          "Indirect RSS action not supported");
17197                 return mlx5_validate_action_rss(dev, action, err);
17198         case RTE_FLOW_ACTION_TYPE_AGE:
17199                 if (!priv->sh->aso_age_mng)
17200                         return rte_flow_error_set(err, ENOTSUP,
17201                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17202                                                 NULL,
17203                                                 "Indirect age action not supported");
17204                 return flow_dv_validate_action_age(0, action, dev, err);
17205         case RTE_FLOW_ACTION_TYPE_COUNT:
17206                 /*
17207                  * There are two mechanisms to share the action count.
17208                  * The old mechanism uses the shared field to share, while the
17209                  * new mechanism uses the indirect action API.
17210                  * This validation comes to make sure that the two mechanisms
17211                  * are not combined.
17212                  */
17213                 if (is_shared_action_count(action))
17214                         return rte_flow_error_set(err, ENOTSUP,
17215                                                   RTE_FLOW_ERROR_TYPE_ACTION,
17216                                                   NULL,
17217                                                   "Mix shared and indirect counter is not supported");
17218                 return flow_dv_validate_action_count(dev, true, 0, err);
17219         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
17220                 if (!priv->sh->ct_aso_en)
17221                         return rte_flow_error_set(err, ENOTSUP,
17222                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17223                                         "ASO CT is not supported");
17224                 return mlx5_validate_action_ct(dev, action->conf, err);
17225         default:
17226                 return rte_flow_error_set(err, ENOTSUP,
17227                                           RTE_FLOW_ERROR_TYPE_ACTION,
17228                                           NULL,
17229                                           "action type not supported");
17230         }
17231 }
17232
17233 /**
17234  * Validate the meter hierarchy chain for meter policy.
17235  *
17236  * @param[in] dev
17237  *   Pointer to the Ethernet device structure.
17238  * @param[in] meter_id
17239  *   Meter id.
17240  * @param[in] action_flags
17241  *   Holds the actions detected until now.
17242  * @param[out] is_rss
17243  *   Is RSS or not.
17244  * @param[out] hierarchy_domain
17245  *   The domain bitmap for hierarchy policy.
17246  * @param[out] error
17247  *   Perform verbose error reporting if not NULL. Initialized in case of
17248  *   error only.
17249  *
17250  * @return
17251  *   0 on success, otherwise negative errno value with error set.
17252  */
17253 static int
17254 flow_dv_validate_policy_mtr_hierarchy(struct rte_eth_dev *dev,
17255                                   uint32_t meter_id,
17256                                   uint64_t action_flags,
17257                                   bool *is_rss,
17258                                   uint8_t *hierarchy_domain,
17259                                   struct rte_mtr_error *error)
17260 {
17261         struct mlx5_priv *priv = dev->data->dev_private;
17262         struct mlx5_flow_meter_info *fm;
17263         struct mlx5_flow_meter_policy *policy;
17264         uint8_t cnt = 1;
17265
17266         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
17267                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
17268                 return -rte_mtr_error_set(error, EINVAL,
17269                                         RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN,
17270                                         NULL,
17271                                         "Multiple fate actions not supported.");
17272         while (true) {
17273                 fm = mlx5_flow_meter_find(priv, meter_id, NULL);
17274                 if (!fm)
17275                         return -rte_mtr_error_set(error, EINVAL,
17276                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17277                                         "Meter not found in meter hierarchy.");
17278                 if (fm->def_policy)
17279                         return -rte_mtr_error_set(error, EINVAL,
17280                                         RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17281                         "Non termination meter not supported in hierarchy.");
17282                 policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
17283                 MLX5_ASSERT(policy);
17284                 if (!policy->is_hierarchy) {
17285                         if (policy->transfer)
17286                                 *hierarchy_domain |=
17287                                                 MLX5_MTR_DOMAIN_TRANSFER_BIT;
17288                         if (policy->ingress)
17289                                 *hierarchy_domain |=
17290                                                 MLX5_MTR_DOMAIN_INGRESS_BIT;
17291                         if (policy->egress)
17292                                 *hierarchy_domain |= MLX5_MTR_DOMAIN_EGRESS_BIT;
17293                         *is_rss = policy->is_rss;
17294                         break;
17295                 }
17296                 meter_id = policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id;
17297                 if (++cnt >= MLX5_MTR_CHAIN_MAX_NUM)
17298                         return -rte_mtr_error_set(error, EINVAL,
17299                                         RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
17300                                         "Exceed max hierarchy meter number.");
17301         }
17302         return 0;
17303 }
17304
17305 /**
17306  * Validate meter policy actions.
17307  * Dispatcher for action type specific validation.
17308  *
17309  * @param[in] dev
17310  *   Pointer to the Ethernet device structure.
17311  * @param[in] action
17312  *   The meter policy action object to validate.
17313  * @param[in] attr
17314  *   Attributes of flow to determine steering domain.
17315  * @param[out] error
17316  *   Perform verbose error reporting if not NULL. Initialized in case of
17317  *   error only.
17318  *
17319  * @return
17320  *   0 on success, otherwise negative errno value.
17321  */
17322 static int
17323 flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
17324                         const struct rte_flow_action *actions[RTE_COLORS],
17325                         struct rte_flow_attr *attr,
17326                         bool *is_rss,
17327                         uint8_t *domain_bitmap,
17328                         bool *is_def_policy,
17329                         struct rte_mtr_error *error)
17330 {
17331         struct mlx5_priv *priv = dev->data->dev_private;
17332         struct mlx5_dev_config *dev_conf = &priv->config;
17333         const struct rte_flow_action *act;
17334         uint64_t action_flags = 0;
17335         int actions_n;
17336         int i, ret;
17337         struct rte_flow_error flow_err;
17338         uint8_t domain_color[RTE_COLORS] = {0};
17339         uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
17340         uint8_t hierarchy_domain = 0;
17341         const struct rte_flow_action_meter *mtr;
17342
17343         if (!priv->config.dv_esw_en)
17344                 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
17345         *domain_bitmap = def_domain;
17346         if (actions[RTE_COLOR_YELLOW] &&
17347                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_END)
17348                 return -rte_mtr_error_set(error, ENOTSUP,
17349                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17350                                 NULL,
17351                                 "Yellow color does not support any action.");
17352         if (actions[RTE_COLOR_YELLOW] &&
17353                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_DROP)
17354                 return -rte_mtr_error_set(error, ENOTSUP,
17355                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17356                                 NULL, "Red color only supports drop action.");
17357         /*
17358          * Check default policy actions:
17359          * Green/Yellow: no action, Red: drop action
17360          */
17361         if ((!actions[RTE_COLOR_GREEN] ||
17362                 actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)) {
17363                 *is_def_policy = true;
17364                 return 0;
17365         }
17366         flow_err.message = NULL;
17367         for (i = 0; i < RTE_COLORS; i++) {
17368                 act = actions[i];
17369                 for (action_flags = 0, actions_n = 0;
17370                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
17371                         act++) {
17372                         if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
17373                                 return -rte_mtr_error_set(error, ENOTSUP,
17374                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17375                                           NULL, "too many actions");
17376                         switch (act->type) {
17377                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
17378                                 if (!priv->config.dv_esw_en)
17379                                         return -rte_mtr_error_set(error,
17380                                         ENOTSUP,
17381                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17382                                         NULL, "PORT action validate check"
17383                                         " fail for ESW disable");
17384                                 ret = flow_dv_validate_action_port_id(dev,
17385                                                 action_flags,
17386                                                 act, attr, &flow_err);
17387                                 if (ret)
17388                                         return -rte_mtr_error_set(error,
17389                                         ENOTSUP,
17390                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17391                                         NULL, flow_err.message ?
17392                                         flow_err.message :
17393                                         "PORT action validate check fail");
17394                                 ++actions_n;
17395                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
17396                                 break;
17397                         case RTE_FLOW_ACTION_TYPE_MARK:
17398                                 ret = flow_dv_validate_action_mark(dev, act,
17399                                                            action_flags,
17400                                                            attr, &flow_err);
17401                                 if (ret < 0)
17402                                         return -rte_mtr_error_set(error,
17403                                         ENOTSUP,
17404                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17405                                         NULL, flow_err.message ?
17406                                         flow_err.message :
17407                                         "Mark action validate check fail");
17408                                 if (dev_conf->dv_xmeta_en !=
17409                                         MLX5_XMETA_MODE_LEGACY)
17410                                         return -rte_mtr_error_set(error,
17411                                         ENOTSUP,
17412                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17413                                         NULL, "Extend MARK action is "
17414                                         "not supported. Please try use "
17415                                         "default policy for meter.");
17416                                 action_flags |= MLX5_FLOW_ACTION_MARK;
17417                                 ++actions_n;
17418                                 break;
17419                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
17420                                 ret = flow_dv_validate_action_set_tag(dev,
17421                                                         act, action_flags,
17422                                                         attr, &flow_err);
17423                                 if (ret)
17424                                         return -rte_mtr_error_set(error,
17425                                         ENOTSUP,
17426                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17427                                         NULL, flow_err.message ?
17428                                         flow_err.message :
17429                                         "Set tag action validate check fail");
17430                                 /*
17431                                  * Count all modify-header actions
17432                                  * as one action.
17433                                  */
17434                                 if (!(action_flags &
17435                                         MLX5_FLOW_MODIFY_HDR_ACTIONS))
17436                                         ++actions_n;
17437                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
17438                                 break;
17439                         case RTE_FLOW_ACTION_TYPE_DROP:
17440                                 ret = mlx5_flow_validate_action_drop
17441                                         (action_flags,
17442                                         attr, &flow_err);
17443                                 if (ret < 0)
17444                                         return -rte_mtr_error_set(error,
17445                                         ENOTSUP,
17446                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17447                                         NULL, flow_err.message ?
17448                                         flow_err.message :
17449                                         "Drop action validate check fail");
17450                                 action_flags |= MLX5_FLOW_ACTION_DROP;
17451                                 ++actions_n;
17452                                 break;
17453                         case RTE_FLOW_ACTION_TYPE_QUEUE:
17454                                 /*
17455                                  * Check whether extensive
17456                                  * metadata feature is engaged.
17457                                  */
17458                                 if (dev_conf->dv_flow_en &&
17459                                         (dev_conf->dv_xmeta_en !=
17460                                         MLX5_XMETA_MODE_LEGACY) &&
17461                                         mlx5_flow_ext_mreg_supported(dev))
17462                                         return -rte_mtr_error_set(error,
17463                                           ENOTSUP,
17464                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17465                                           NULL, "Queue action with meta "
17466                                           "is not supported. Please try use "
17467                                           "default policy for meter.");
17468                                 ret = mlx5_flow_validate_action_queue(act,
17469                                                         action_flags, dev,
17470                                                         attr, &flow_err);
17471                                 if (ret < 0)
17472                                         return -rte_mtr_error_set(error,
17473                                           ENOTSUP,
17474                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17475                                           NULL, flow_err.message ?
17476                                           flow_err.message :
17477                                           "Queue action validate check fail");
17478                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
17479                                 ++actions_n;
17480                                 break;
17481                         case RTE_FLOW_ACTION_TYPE_RSS:
17482                                 if (dev_conf->dv_flow_en &&
17483                                         (dev_conf->dv_xmeta_en !=
17484                                         MLX5_XMETA_MODE_LEGACY) &&
17485                                         mlx5_flow_ext_mreg_supported(dev))
17486                                         return -rte_mtr_error_set(error,
17487                                           ENOTSUP,
17488                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17489                                           NULL, "RSS action with meta "
17490                                           "is not supported. Please try use "
17491                                           "default policy for meter.");
17492                                 ret = mlx5_validate_action_rss(dev, act,
17493                                                 &flow_err);
17494                                 if (ret < 0)
17495                                         return -rte_mtr_error_set(error,
17496                                           ENOTSUP,
17497                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17498                                           NULL, flow_err.message ?
17499                                           flow_err.message :
17500                                           "RSS action validate check fail");
17501                                 action_flags |= MLX5_FLOW_ACTION_RSS;
17502                                 ++actions_n;
17503                                 *is_rss = true;
17504                                 break;
17505                         case RTE_FLOW_ACTION_TYPE_JUMP:
17506                                 ret = flow_dv_validate_action_jump(dev,
17507                                         NULL, act, action_flags,
17508                                         attr, true, &flow_err);
17509                                 if (ret)
17510                                         return -rte_mtr_error_set(error,
17511                                           ENOTSUP,
17512                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17513                                           NULL, flow_err.message ?
17514                                           flow_err.message :
17515                                           "Jump action validate check fail");
17516                                 ++actions_n;
17517                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
17518                                 break;
17519                         case RTE_FLOW_ACTION_TYPE_METER:
17520                                 if (i != RTE_COLOR_GREEN)
17521                                         return -rte_mtr_error_set(error,
17522                                                 ENOTSUP,
17523                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17524                                                 NULL, flow_err.message ?
17525                                                 flow_err.message :
17526                                   "Meter hierarchy only supports GREEN color.");
17527                                 mtr = act->conf;
17528                                 ret = flow_dv_validate_policy_mtr_hierarchy(dev,
17529                                                         mtr->mtr_id,
17530                                                         action_flags,
17531                                                         is_rss,
17532                                                         &hierarchy_domain,
17533                                                         error);
17534                                 if (ret)
17535                                         return ret;
17536                                 ++actions_n;
17537                                 action_flags |=
17538                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
17539                                 break;
17540                         default:
17541                                 return -rte_mtr_error_set(error, ENOTSUP,
17542                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17543                                         NULL,
17544                                         "Doesn't support optional action");
17545                         }
17546                 }
17547                 /* Yellow is not supported, just skip. */
17548                 if (i == RTE_COLOR_YELLOW)
17549                         continue;
17550                 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
17551                         domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
17552                 else if ((action_flags &
17553                         (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
17554                         (action_flags & MLX5_FLOW_ACTION_MARK))
17555                         /*
17556                          * Only support MLX5_XMETA_MODE_LEGACY
17557                          * so MARK action only in ingress domain.
17558                          */
17559                         domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
17560                 else if (action_flags &
17561                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
17562                         domain_color[i] = hierarchy_domain;
17563                 else
17564                         domain_color[i] = def_domain;
17565                 /*
17566                  * Validate the drop action mutual exclusion
17567                  * with other actions. Drop action is mutually-exclusive
17568                  * with any other action, except for Count action.
17569                  */
17570                 if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
17571                         (action_flags & ~MLX5_FLOW_ACTION_DROP)) {
17572                         return -rte_mtr_error_set(error, ENOTSUP,
17573                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17574                                 NULL, "Drop action is mutually-exclusive "
17575                                 "with any other action");
17576                 }
17577                 /* Eswitch has few restrictions on using items and actions */
17578                 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
17579                         if (!mlx5_flow_ext_mreg_supported(dev) &&
17580                                 action_flags & MLX5_FLOW_ACTION_MARK)
17581                                 return -rte_mtr_error_set(error, ENOTSUP,
17582                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17583                                         NULL, "unsupported action MARK");
17584                         if (action_flags & MLX5_FLOW_ACTION_QUEUE)
17585                                 return -rte_mtr_error_set(error, ENOTSUP,
17586                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17587                                         NULL, "unsupported action QUEUE");
17588                         if (action_flags & MLX5_FLOW_ACTION_RSS)
17589                                 return -rte_mtr_error_set(error, ENOTSUP,
17590                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17591                                         NULL, "unsupported action RSS");
17592                         if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
17593                                 return -rte_mtr_error_set(error, ENOTSUP,
17594                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17595                                         NULL, "no fate action is found");
17596                 } else {
17597                         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) &&
17598                                 (domain_color[i] &
17599                                 MLX5_MTR_DOMAIN_INGRESS_BIT)) {
17600                                 if ((domain_color[i] &
17601                                         MLX5_MTR_DOMAIN_EGRESS_BIT))
17602                                         domain_color[i] =
17603                                         MLX5_MTR_DOMAIN_EGRESS_BIT;
17604                                 else
17605                                         return -rte_mtr_error_set(error,
17606                                         ENOTSUP,
17607                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17608                                         NULL, "no fate action is found");
17609                         }
17610                 }
17611                 if (domain_color[i] != def_domain)
17612                         *domain_bitmap = domain_color[i];
17613         }
17614         return 0;
17615 }
17616
17617 static int
17618 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
17619 {
17620         struct mlx5_priv *priv = dev->data->dev_private;
17621         int ret = 0;
17622
17623         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
17624                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
17625                                                 flags);
17626                 if (ret != 0)
17627                         return ret;
17628         }
17629         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
17630                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
17631                 if (ret != 0)
17632                         return ret;
17633         }
17634         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
17635                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
17636                 if (ret != 0)
17637                         return ret;
17638         }
17639         return 0;
17640 }
17641
17642 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
17643         .validate = flow_dv_validate,
17644         .prepare = flow_dv_prepare,
17645         .translate = flow_dv_translate,
17646         .apply = flow_dv_apply,
17647         .remove = flow_dv_remove,
17648         .destroy = flow_dv_destroy,
17649         .query = flow_dv_query,
17650         .create_mtr_tbls = flow_dv_create_mtr_tbls,
17651         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
17652         .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
17653         .create_meter = flow_dv_mtr_alloc,
17654         .free_meter = flow_dv_aso_mtr_release_to_pool,
17655         .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
17656         .create_mtr_acts = flow_dv_create_mtr_policy_acts,
17657         .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
17658         .create_policy_rules = flow_dv_create_policy_rules,
17659         .destroy_policy_rules = flow_dv_destroy_policy_rules,
17660         .create_def_policy = flow_dv_create_def_policy,
17661         .destroy_def_policy = flow_dv_destroy_def_policy,
17662         .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
17663         .meter_hierarchy_rule_create = flow_dv_meter_hierarchy_rule_create,
17664         .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
17665         .counter_alloc = flow_dv_counter_allocate,
17666         .counter_free = flow_dv_counter_free,
17667         .counter_query = flow_dv_counter_query,
17668         .get_aged_flows = flow_get_aged_flows,
17669         .action_validate = flow_dv_action_validate,
17670         .action_create = flow_dv_action_create,
17671         .action_destroy = flow_dv_action_destroy,
17672         .action_update = flow_dv_action_update,
17673         .action_query = flow_dv_action_query,
17674         .sync_domain = flow_dv_sync_domain,
17675 };
17676
17677 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
17678