net/mlx5: fix modify field action order for MAC
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24 #include <rte_mtr.h>
25 #include <rte_mtr_driver.h>
26
27 #include <mlx5_glue.h>
28 #include <mlx5_devx_cmds.h>
29 #include <mlx5_prm.h>
30 #include <mlx5_malloc.h>
31
32 #include "mlx5_defs.h"
33 #include "mlx5.h"
34 #include "mlx5_common_os.h"
35 #include "mlx5_flow.h"
36 #include "mlx5_flow_os.h"
37 #include "mlx5_rx.h"
38 #include "mlx5_tx.h"
39 #include "rte_pmd_mlx5.h"
40
41 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
42
43 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
44 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
45 #endif
46
47 #ifndef HAVE_MLX5DV_DR_ESWITCH
48 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
49 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
50 #endif
51 #endif
52
53 #ifndef HAVE_MLX5DV_DR
54 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
55 #endif
56
57 /* VLAN header definitions */
58 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
59 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
60 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
61 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
62 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
63
64 union flow_dv_attr {
65         struct {
66                 uint32_t valid:1;
67                 uint32_t ipv4:1;
68                 uint32_t ipv6:1;
69                 uint32_t tcp:1;
70                 uint32_t udp:1;
71                 uint32_t reserved:27;
72         };
73         uint32_t attr;
74 };
75
76 static int
77 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
78                              struct mlx5_flow_tbl_resource *tbl);
79
80 static int
81 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
82                                      uint32_t encap_decap_idx);
83
84 static int
85 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
86                                         uint32_t port_id);
87 static void
88 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
89
90 static int
91 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
92                                   uint32_t rix_jump);
93
94 /**
95  * Initialize flow attributes structure according to flow items' types.
96  *
97  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
98  * mode. For tunnel mode, the items to be modified are the outermost ones.
99  *
100  * @param[in] item
101  *   Pointer to item specification.
102  * @param[out] attr
103  *   Pointer to flow attributes structure.
104  * @param[in] dev_flow
105  *   Pointer to the sub flow.
106  * @param[in] tunnel_decap
107  *   Whether action is after tunnel decapsulation.
108  */
109 static void
110 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
111                   struct mlx5_flow *dev_flow, bool tunnel_decap)
112 {
113         uint64_t layers = dev_flow->handle->layers;
114
115         /*
116          * If layers is already initialized, it means this dev_flow is the
117          * suffix flow, the layers flags is set by the prefix flow. Need to
118          * use the layer flags from prefix flow as the suffix flow may not
119          * have the user defined items as the flow is split.
120          */
121         if (layers) {
122                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
123                         attr->ipv4 = 1;
124                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
125                         attr->ipv6 = 1;
126                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
127                         attr->tcp = 1;
128                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
129                         attr->udp = 1;
130                 attr->valid = 1;
131                 return;
132         }
133         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
134                 uint8_t next_protocol = 0xff;
135                 switch (item->type) {
136                 case RTE_FLOW_ITEM_TYPE_GRE:
137                 case RTE_FLOW_ITEM_TYPE_NVGRE:
138                 case RTE_FLOW_ITEM_TYPE_VXLAN:
139                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
140                 case RTE_FLOW_ITEM_TYPE_GENEVE:
141                 case RTE_FLOW_ITEM_TYPE_MPLS:
142                         if (tunnel_decap)
143                                 attr->attr = 0;
144                         break;
145                 case RTE_FLOW_ITEM_TYPE_IPV4:
146                         if (!attr->ipv6)
147                                 attr->ipv4 = 1;
148                         if (item->mask != NULL &&
149                             ((const struct rte_flow_item_ipv4 *)
150                             item->mask)->hdr.next_proto_id)
151                                 next_protocol =
152                                     ((const struct rte_flow_item_ipv4 *)
153                                       (item->spec))->hdr.next_proto_id &
154                                     ((const struct rte_flow_item_ipv4 *)
155                                       (item->mask))->hdr.next_proto_id;
156                         if ((next_protocol == IPPROTO_IPIP ||
157                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
158                                 attr->attr = 0;
159                         break;
160                 case RTE_FLOW_ITEM_TYPE_IPV6:
161                         if (!attr->ipv4)
162                                 attr->ipv6 = 1;
163                         if (item->mask != NULL &&
164                             ((const struct rte_flow_item_ipv6 *)
165                             item->mask)->hdr.proto)
166                                 next_protocol =
167                                     ((const struct rte_flow_item_ipv6 *)
168                                       (item->spec))->hdr.proto &
169                                     ((const struct rte_flow_item_ipv6 *)
170                                       (item->mask))->hdr.proto;
171                         if ((next_protocol == IPPROTO_IPIP ||
172                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
173                                 attr->attr = 0;
174                         break;
175                 case RTE_FLOW_ITEM_TYPE_UDP:
176                         if (!attr->tcp)
177                                 attr->udp = 1;
178                         break;
179                 case RTE_FLOW_ITEM_TYPE_TCP:
180                         if (!attr->udp)
181                                 attr->tcp = 1;
182                         break;
183                 default:
184                         break;
185                 }
186         }
187         attr->valid = 1;
188 }
189
190 /**
191  * Convert rte_mtr_color to mlx5 color.
192  *
193  * @param[in] rcol
194  *   rte_mtr_color.
195  *
196  * @return
197  *   mlx5 color.
198  */
199 static int
200 rte_col_2_mlx5_col(enum rte_color rcol)
201 {
202         switch (rcol) {
203         case RTE_COLOR_GREEN:
204                 return MLX5_FLOW_COLOR_GREEN;
205         case RTE_COLOR_YELLOW:
206                 return MLX5_FLOW_COLOR_YELLOW;
207         case RTE_COLOR_RED:
208                 return MLX5_FLOW_COLOR_RED;
209         default:
210                 break;
211         }
212         return MLX5_FLOW_COLOR_UNDEFINED;
213 }
214
215 struct field_modify_info {
216         uint32_t size; /* Size of field in protocol header, in bytes. */
217         uint32_t offset; /* Offset of field in protocol header, in bytes. */
218         enum mlx5_modification_field id;
219 };
220
221 struct field_modify_info modify_eth[] = {
222         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
223         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
224         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
225         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
226         {0, 0, 0},
227 };
228
229 struct field_modify_info modify_vlan_out_first_vid[] = {
230         /* Size in bits !!! */
231         {12, 0, MLX5_MODI_OUT_FIRST_VID},
232         {0, 0, 0},
233 };
234
235 struct field_modify_info modify_ipv4[] = {
236         {1,  1, MLX5_MODI_OUT_IP_DSCP},
237         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
238         {4, 12, MLX5_MODI_OUT_SIPV4},
239         {4, 16, MLX5_MODI_OUT_DIPV4},
240         {0, 0, 0},
241 };
242
243 struct field_modify_info modify_ipv6[] = {
244         {1,  0, MLX5_MODI_OUT_IP_DSCP},
245         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
246         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
247         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
248         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
249         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
250         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
251         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
252         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
253         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
254         {0, 0, 0},
255 };
256
257 struct field_modify_info modify_udp[] = {
258         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
259         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
260         {0, 0, 0},
261 };
262
263 struct field_modify_info modify_tcp[] = {
264         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
265         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
266         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
267         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
268         {0, 0, 0},
269 };
270
271 static const struct rte_flow_item *
272 mlx5_flow_find_tunnel_item(const struct rte_flow_item *item)
273 {
274         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
275                 switch (item->type) {
276                 default:
277                         break;
278                 case RTE_FLOW_ITEM_TYPE_VXLAN:
279                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
280                 case RTE_FLOW_ITEM_TYPE_GRE:
281                 case RTE_FLOW_ITEM_TYPE_MPLS:
282                 case RTE_FLOW_ITEM_TYPE_NVGRE:
283                 case RTE_FLOW_ITEM_TYPE_GENEVE:
284                         return item;
285                 case RTE_FLOW_ITEM_TYPE_IPV4:
286                 case RTE_FLOW_ITEM_TYPE_IPV6:
287                         if (item[1].type == RTE_FLOW_ITEM_TYPE_IPV4 ||
288                             item[1].type == RTE_FLOW_ITEM_TYPE_IPV6)
289                                 return item;
290                         break;
291                 }
292         }
293         return NULL;
294 }
295
296 static void
297 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
298                           uint8_t next_protocol, uint64_t *item_flags,
299                           int *tunnel)
300 {
301         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
302                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
303         if (next_protocol == IPPROTO_IPIP) {
304                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
305                 *tunnel = 1;
306         }
307         if (next_protocol == IPPROTO_IPV6) {
308                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
309                 *tunnel = 1;
310         }
311 }
312
313 /* Update VLAN's VID/PCP based on input rte_flow_action.
314  *
315  * @param[in] action
316  *   Pointer to struct rte_flow_action.
317  * @param[out] vlan
318  *   Pointer to struct rte_vlan_hdr.
319  */
320 static void
321 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
322                          struct rte_vlan_hdr *vlan)
323 {
324         uint16_t vlan_tci;
325         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
326                 vlan_tci =
327                     ((const struct rte_flow_action_of_set_vlan_pcp *)
328                                                action->conf)->vlan_pcp;
329                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
330                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
331                 vlan->vlan_tci |= vlan_tci;
332         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
333                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
334                 vlan->vlan_tci |= rte_be_to_cpu_16
335                     (((const struct rte_flow_action_of_set_vlan_vid *)
336                                              action->conf)->vlan_vid);
337         }
338 }
339
340 /**
341  * Fetch 1, 2, 3 or 4 byte field from the byte array
342  * and return as unsigned integer in host-endian format.
343  *
344  * @param[in] data
345  *   Pointer to data array.
346  * @param[in] size
347  *   Size of field to extract.
348  *
349  * @return
350  *   converted field in host endian format.
351  */
352 static inline uint32_t
353 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
354 {
355         uint32_t ret;
356
357         switch (size) {
358         case 1:
359                 ret = *data;
360                 break;
361         case 2:
362                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
363                 break;
364         case 3:
365                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
366                 ret = (ret << 8) | *(data + sizeof(uint16_t));
367                 break;
368         case 4:
369                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
370                 break;
371         default:
372                 MLX5_ASSERT(false);
373                 ret = 0;
374                 break;
375         }
376         return ret;
377 }
378
379 /**
380  * Convert modify-header action to DV specification.
381  *
382  * Data length of each action is determined by provided field description
383  * and the item mask. Data bit offset and width of each action is determined
384  * by provided item mask.
385  *
386  * @param[in] item
387  *   Pointer to item specification.
388  * @param[in] field
389  *   Pointer to field modification information.
390  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
391  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
392  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
393  * @param[in] dcopy
394  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
395  *   Negative offset value sets the same offset as source offset.
396  *   size field is ignored, value is taken from source field.
397  * @param[in,out] resource
398  *   Pointer to the modify-header resource.
399  * @param[in] type
400  *   Type of modification.
401  * @param[out] error
402  *   Pointer to the error structure.
403  *
404  * @return
405  *   0 on success, a negative errno value otherwise and rte_errno is set.
406  */
407 static int
408 flow_dv_convert_modify_action(struct rte_flow_item *item,
409                               struct field_modify_info *field,
410                               struct field_modify_info *dcopy,
411                               struct mlx5_flow_dv_modify_hdr_resource *resource,
412                               uint32_t type, struct rte_flow_error *error)
413 {
414         uint32_t i = resource->actions_num;
415         struct mlx5_modification_cmd *actions = resource->actions;
416
417         /*
418          * The item and mask are provided in big-endian format.
419          * The fields should be presented as in big-endian format either.
420          * Mask must be always present, it defines the actual field width.
421          */
422         MLX5_ASSERT(item->mask);
423         MLX5_ASSERT(field->size);
424         do {
425                 unsigned int size_b;
426                 unsigned int off_b;
427                 uint32_t mask;
428                 uint32_t data;
429                 bool next_field = true;
430                 bool next_dcopy = true;
431
432                 if (i >= MLX5_MAX_MODIFY_NUM)
433                         return rte_flow_error_set(error, EINVAL,
434                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
435                                  "too many items to modify");
436                 /* Fetch variable byte size mask from the array. */
437                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
438                                            field->offset, field->size);
439                 if (!mask) {
440                         ++field;
441                         continue;
442                 }
443                 /* Deduce actual data width in bits from mask value. */
444                 off_b = rte_bsf32(mask);
445                 size_b = sizeof(uint32_t) * CHAR_BIT -
446                          off_b - __builtin_clz(mask);
447                 MLX5_ASSERT(size_b);
448                 actions[i] = (struct mlx5_modification_cmd) {
449                         .action_type = type,
450                         .field = field->id,
451                         .offset = off_b,
452                         .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ?
453                                 0 : size_b,
454                 };
455                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
456                         MLX5_ASSERT(dcopy);
457                         actions[i].dst_field = dcopy->id;
458                         actions[i].dst_offset =
459                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
460                         /* Convert entire record to big-endian format. */
461                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
462                         /*
463                          * Destination field overflow. Copy leftovers of
464                          * a source field to the next destination field.
465                          */
466                         if ((size_b > dcopy->size * CHAR_BIT) && dcopy->size) {
467                                 actions[i].length = dcopy->size * CHAR_BIT;
468                                 field->offset += dcopy->size;
469                                 next_field = false;
470                         }
471                         /*
472                          * Not enough bits in a source filed to fill a
473                          * destination field. Switch to the next source.
474                          */
475                         if (dcopy->size > field->size &&
476                             (size_b == field->size * CHAR_BIT)) {
477                                 actions[i].length = field->size * CHAR_BIT;
478                                 dcopy->offset += field->size * CHAR_BIT;
479                                 next_dcopy = false;
480                         }
481                         if (next_dcopy)
482                                 ++dcopy;
483                 } else {
484                         MLX5_ASSERT(item->spec);
485                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
486                                                    field->offset, field->size);
487                         /* Shift out the trailing masked bits from data. */
488                         data = (data & mask) >> off_b;
489                         actions[i].data1 = rte_cpu_to_be_32(data);
490                 }
491                 /* Convert entire record to expected big-endian format. */
492                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
493                 if (next_field)
494                         ++field;
495                 ++i;
496         } while (field->size);
497         if (resource->actions_num == i)
498                 return rte_flow_error_set(error, EINVAL,
499                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
500                                           "invalid modification flow item");
501         resource->actions_num = i;
502         return 0;
503 }
504
505 /**
506  * Convert modify-header set IPv4 address action to DV specification.
507  *
508  * @param[in,out] resource
509  *   Pointer to the modify-header resource.
510  * @param[in] action
511  *   Pointer to action specification.
512  * @param[out] error
513  *   Pointer to the error structure.
514  *
515  * @return
516  *   0 on success, a negative errno value otherwise and rte_errno is set.
517  */
518 static int
519 flow_dv_convert_action_modify_ipv4
520                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
521                          const struct rte_flow_action *action,
522                          struct rte_flow_error *error)
523 {
524         const struct rte_flow_action_set_ipv4 *conf =
525                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
526         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
527         struct rte_flow_item_ipv4 ipv4;
528         struct rte_flow_item_ipv4 ipv4_mask;
529
530         memset(&ipv4, 0, sizeof(ipv4));
531         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
532         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
533                 ipv4.hdr.src_addr = conf->ipv4_addr;
534                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
535         } else {
536                 ipv4.hdr.dst_addr = conf->ipv4_addr;
537                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
538         }
539         item.spec = &ipv4;
540         item.mask = &ipv4_mask;
541         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
542                                              MLX5_MODIFICATION_TYPE_SET, error);
543 }
544
545 /**
546  * Convert modify-header set IPv6 address action to DV specification.
547  *
548  * @param[in,out] resource
549  *   Pointer to the modify-header resource.
550  * @param[in] action
551  *   Pointer to action specification.
552  * @param[out] error
553  *   Pointer to the error structure.
554  *
555  * @return
556  *   0 on success, a negative errno value otherwise and rte_errno is set.
557  */
558 static int
559 flow_dv_convert_action_modify_ipv6
560                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
561                          const struct rte_flow_action *action,
562                          struct rte_flow_error *error)
563 {
564         const struct rte_flow_action_set_ipv6 *conf =
565                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
566         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
567         struct rte_flow_item_ipv6 ipv6;
568         struct rte_flow_item_ipv6 ipv6_mask;
569
570         memset(&ipv6, 0, sizeof(ipv6));
571         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
572         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
573                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
574                        sizeof(ipv6.hdr.src_addr));
575                 memcpy(&ipv6_mask.hdr.src_addr,
576                        &rte_flow_item_ipv6_mask.hdr.src_addr,
577                        sizeof(ipv6.hdr.src_addr));
578         } else {
579                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
580                        sizeof(ipv6.hdr.dst_addr));
581                 memcpy(&ipv6_mask.hdr.dst_addr,
582                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
583                        sizeof(ipv6.hdr.dst_addr));
584         }
585         item.spec = &ipv6;
586         item.mask = &ipv6_mask;
587         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
588                                              MLX5_MODIFICATION_TYPE_SET, error);
589 }
590
591 /**
592  * Convert modify-header set MAC address action to DV specification.
593  *
594  * @param[in,out] resource
595  *   Pointer to the modify-header resource.
596  * @param[in] action
597  *   Pointer to action specification.
598  * @param[out] error
599  *   Pointer to the error structure.
600  *
601  * @return
602  *   0 on success, a negative errno value otherwise and rte_errno is set.
603  */
604 static int
605 flow_dv_convert_action_modify_mac
606                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
607                          const struct rte_flow_action *action,
608                          struct rte_flow_error *error)
609 {
610         const struct rte_flow_action_set_mac *conf =
611                 (const struct rte_flow_action_set_mac *)(action->conf);
612         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
613         struct rte_flow_item_eth eth;
614         struct rte_flow_item_eth eth_mask;
615
616         memset(&eth, 0, sizeof(eth));
617         memset(&eth_mask, 0, sizeof(eth_mask));
618         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
619                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
620                        sizeof(eth.src.addr_bytes));
621                 memcpy(&eth_mask.src.addr_bytes,
622                        &rte_flow_item_eth_mask.src.addr_bytes,
623                        sizeof(eth_mask.src.addr_bytes));
624         } else {
625                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
626                        sizeof(eth.dst.addr_bytes));
627                 memcpy(&eth_mask.dst.addr_bytes,
628                        &rte_flow_item_eth_mask.dst.addr_bytes,
629                        sizeof(eth_mask.dst.addr_bytes));
630         }
631         item.spec = &eth;
632         item.mask = &eth_mask;
633         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
634                                              MLX5_MODIFICATION_TYPE_SET, error);
635 }
636
637 /**
638  * Convert modify-header set VLAN VID action to DV specification.
639  *
640  * @param[in,out] resource
641  *   Pointer to the modify-header resource.
642  * @param[in] action
643  *   Pointer to action specification.
644  * @param[out] error
645  *   Pointer to the error structure.
646  *
647  * @return
648  *   0 on success, a negative errno value otherwise and rte_errno is set.
649  */
650 static int
651 flow_dv_convert_action_modify_vlan_vid
652                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
653                          const struct rte_flow_action *action,
654                          struct rte_flow_error *error)
655 {
656         const struct rte_flow_action_of_set_vlan_vid *conf =
657                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
658         int i = resource->actions_num;
659         struct mlx5_modification_cmd *actions = resource->actions;
660         struct field_modify_info *field = modify_vlan_out_first_vid;
661
662         if (i >= MLX5_MAX_MODIFY_NUM)
663                 return rte_flow_error_set(error, EINVAL,
664                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
665                          "too many items to modify");
666         actions[i] = (struct mlx5_modification_cmd) {
667                 .action_type = MLX5_MODIFICATION_TYPE_SET,
668                 .field = field->id,
669                 .length = field->size,
670                 .offset = field->offset,
671         };
672         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
673         actions[i].data1 = conf->vlan_vid;
674         actions[i].data1 = actions[i].data1 << 16;
675         resource->actions_num = ++i;
676         return 0;
677 }
678
679 /**
680  * Convert modify-header set TP action to DV specification.
681  *
682  * @param[in,out] resource
683  *   Pointer to the modify-header resource.
684  * @param[in] action
685  *   Pointer to action specification.
686  * @param[in] items
687  *   Pointer to rte_flow_item objects list.
688  * @param[in] attr
689  *   Pointer to flow attributes structure.
690  * @param[in] dev_flow
691  *   Pointer to the sub flow.
692  * @param[in] tunnel_decap
693  *   Whether action is after tunnel decapsulation.
694  * @param[out] error
695  *   Pointer to the error structure.
696  *
697  * @return
698  *   0 on success, a negative errno value otherwise and rte_errno is set.
699  */
700 static int
701 flow_dv_convert_action_modify_tp
702                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
703                          const struct rte_flow_action *action,
704                          const struct rte_flow_item *items,
705                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
706                          bool tunnel_decap, struct rte_flow_error *error)
707 {
708         const struct rte_flow_action_set_tp *conf =
709                 (const struct rte_flow_action_set_tp *)(action->conf);
710         struct rte_flow_item item;
711         struct rte_flow_item_udp udp;
712         struct rte_flow_item_udp udp_mask;
713         struct rte_flow_item_tcp tcp;
714         struct rte_flow_item_tcp tcp_mask;
715         struct field_modify_info *field;
716
717         if (!attr->valid)
718                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
719         if (attr->udp) {
720                 memset(&udp, 0, sizeof(udp));
721                 memset(&udp_mask, 0, sizeof(udp_mask));
722                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
723                         udp.hdr.src_port = conf->port;
724                         udp_mask.hdr.src_port =
725                                         rte_flow_item_udp_mask.hdr.src_port;
726                 } else {
727                         udp.hdr.dst_port = conf->port;
728                         udp_mask.hdr.dst_port =
729                                         rte_flow_item_udp_mask.hdr.dst_port;
730                 }
731                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
732                 item.spec = &udp;
733                 item.mask = &udp_mask;
734                 field = modify_udp;
735         } else {
736                 MLX5_ASSERT(attr->tcp);
737                 memset(&tcp, 0, sizeof(tcp));
738                 memset(&tcp_mask, 0, sizeof(tcp_mask));
739                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
740                         tcp.hdr.src_port = conf->port;
741                         tcp_mask.hdr.src_port =
742                                         rte_flow_item_tcp_mask.hdr.src_port;
743                 } else {
744                         tcp.hdr.dst_port = conf->port;
745                         tcp_mask.hdr.dst_port =
746                                         rte_flow_item_tcp_mask.hdr.dst_port;
747                 }
748                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
749                 item.spec = &tcp;
750                 item.mask = &tcp_mask;
751                 field = modify_tcp;
752         }
753         return flow_dv_convert_modify_action(&item, field, NULL, resource,
754                                              MLX5_MODIFICATION_TYPE_SET, error);
755 }
756
757 /**
758  * Convert modify-header set TTL action to DV specification.
759  *
760  * @param[in,out] resource
761  *   Pointer to the modify-header resource.
762  * @param[in] action
763  *   Pointer to action specification.
764  * @param[in] items
765  *   Pointer to rte_flow_item objects list.
766  * @param[in] attr
767  *   Pointer to flow attributes structure.
768  * @param[in] dev_flow
769  *   Pointer to the sub flow.
770  * @param[in] tunnel_decap
771  *   Whether action is after tunnel decapsulation.
772  * @param[out] error
773  *   Pointer to the error structure.
774  *
775  * @return
776  *   0 on success, a negative errno value otherwise and rte_errno is set.
777  */
778 static int
779 flow_dv_convert_action_modify_ttl
780                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
781                          const struct rte_flow_action *action,
782                          const struct rte_flow_item *items,
783                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
784                          bool tunnel_decap, struct rte_flow_error *error)
785 {
786         const struct rte_flow_action_set_ttl *conf =
787                 (const struct rte_flow_action_set_ttl *)(action->conf);
788         struct rte_flow_item item;
789         struct rte_flow_item_ipv4 ipv4;
790         struct rte_flow_item_ipv4 ipv4_mask;
791         struct rte_flow_item_ipv6 ipv6;
792         struct rte_flow_item_ipv6 ipv6_mask;
793         struct field_modify_info *field;
794
795         if (!attr->valid)
796                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
797         if (attr->ipv4) {
798                 memset(&ipv4, 0, sizeof(ipv4));
799                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
800                 ipv4.hdr.time_to_live = conf->ttl_value;
801                 ipv4_mask.hdr.time_to_live = 0xFF;
802                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
803                 item.spec = &ipv4;
804                 item.mask = &ipv4_mask;
805                 field = modify_ipv4;
806         } else {
807                 MLX5_ASSERT(attr->ipv6);
808                 memset(&ipv6, 0, sizeof(ipv6));
809                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
810                 ipv6.hdr.hop_limits = conf->ttl_value;
811                 ipv6_mask.hdr.hop_limits = 0xFF;
812                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
813                 item.spec = &ipv6;
814                 item.mask = &ipv6_mask;
815                 field = modify_ipv6;
816         }
817         return flow_dv_convert_modify_action(&item, field, NULL, resource,
818                                              MLX5_MODIFICATION_TYPE_SET, error);
819 }
820
821 /**
822  * Convert modify-header decrement TTL action to DV specification.
823  *
824  * @param[in,out] resource
825  *   Pointer to the modify-header resource.
826  * @param[in] action
827  *   Pointer to action specification.
828  * @param[in] items
829  *   Pointer to rte_flow_item objects list.
830  * @param[in] attr
831  *   Pointer to flow attributes structure.
832  * @param[in] dev_flow
833  *   Pointer to the sub flow.
834  * @param[in] tunnel_decap
835  *   Whether action is after tunnel decapsulation.
836  * @param[out] error
837  *   Pointer to the error structure.
838  *
839  * @return
840  *   0 on success, a negative errno value otherwise and rte_errno is set.
841  */
842 static int
843 flow_dv_convert_action_modify_dec_ttl
844                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
845                          const struct rte_flow_item *items,
846                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
847                          bool tunnel_decap, struct rte_flow_error *error)
848 {
849         struct rte_flow_item item;
850         struct rte_flow_item_ipv4 ipv4;
851         struct rte_flow_item_ipv4 ipv4_mask;
852         struct rte_flow_item_ipv6 ipv6;
853         struct rte_flow_item_ipv6 ipv6_mask;
854         struct field_modify_info *field;
855
856         if (!attr->valid)
857                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
858         if (attr->ipv4) {
859                 memset(&ipv4, 0, sizeof(ipv4));
860                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
861                 ipv4.hdr.time_to_live = 0xFF;
862                 ipv4_mask.hdr.time_to_live = 0xFF;
863                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
864                 item.spec = &ipv4;
865                 item.mask = &ipv4_mask;
866                 field = modify_ipv4;
867         } else {
868                 MLX5_ASSERT(attr->ipv6);
869                 memset(&ipv6, 0, sizeof(ipv6));
870                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
871                 ipv6.hdr.hop_limits = 0xFF;
872                 ipv6_mask.hdr.hop_limits = 0xFF;
873                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
874                 item.spec = &ipv6;
875                 item.mask = &ipv6_mask;
876                 field = modify_ipv6;
877         }
878         return flow_dv_convert_modify_action(&item, field, NULL, resource,
879                                              MLX5_MODIFICATION_TYPE_ADD, error);
880 }
881
882 /**
883  * Convert modify-header increment/decrement TCP Sequence number
884  * to DV specification.
885  *
886  * @param[in,out] resource
887  *   Pointer to the modify-header resource.
888  * @param[in] action
889  *   Pointer to action specification.
890  * @param[out] error
891  *   Pointer to the error structure.
892  *
893  * @return
894  *   0 on success, a negative errno value otherwise and rte_errno is set.
895  */
896 static int
897 flow_dv_convert_action_modify_tcp_seq
898                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
899                          const struct rte_flow_action *action,
900                          struct rte_flow_error *error)
901 {
902         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
903         uint64_t value = rte_be_to_cpu_32(*conf);
904         struct rte_flow_item item;
905         struct rte_flow_item_tcp tcp;
906         struct rte_flow_item_tcp tcp_mask;
907
908         memset(&tcp, 0, sizeof(tcp));
909         memset(&tcp_mask, 0, sizeof(tcp_mask));
910         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
911                 /*
912                  * The HW has no decrement operation, only increment operation.
913                  * To simulate decrement X from Y using increment operation
914                  * we need to add UINT32_MAX X times to Y.
915                  * Each adding of UINT32_MAX decrements Y by 1.
916                  */
917                 value *= UINT32_MAX;
918         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
919         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
920         item.type = RTE_FLOW_ITEM_TYPE_TCP;
921         item.spec = &tcp;
922         item.mask = &tcp_mask;
923         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
924                                              MLX5_MODIFICATION_TYPE_ADD, error);
925 }
926
927 /**
928  * Convert modify-header increment/decrement TCP Acknowledgment number
929  * to DV specification.
930  *
931  * @param[in,out] resource
932  *   Pointer to the modify-header resource.
933  * @param[in] action
934  *   Pointer to action specification.
935  * @param[out] error
936  *   Pointer to the error structure.
937  *
938  * @return
939  *   0 on success, a negative errno value otherwise and rte_errno is set.
940  */
941 static int
942 flow_dv_convert_action_modify_tcp_ack
943                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
944                          const struct rte_flow_action *action,
945                          struct rte_flow_error *error)
946 {
947         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
948         uint64_t value = rte_be_to_cpu_32(*conf);
949         struct rte_flow_item item;
950         struct rte_flow_item_tcp tcp;
951         struct rte_flow_item_tcp tcp_mask;
952
953         memset(&tcp, 0, sizeof(tcp));
954         memset(&tcp_mask, 0, sizeof(tcp_mask));
955         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
956                 /*
957                  * The HW has no decrement operation, only increment operation.
958                  * To simulate decrement X from Y using increment operation
959                  * we need to add UINT32_MAX X times to Y.
960                  * Each adding of UINT32_MAX decrements Y by 1.
961                  */
962                 value *= UINT32_MAX;
963         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
964         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
965         item.type = RTE_FLOW_ITEM_TYPE_TCP;
966         item.spec = &tcp;
967         item.mask = &tcp_mask;
968         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
969                                              MLX5_MODIFICATION_TYPE_ADD, error);
970 }
971
972 static enum mlx5_modification_field reg_to_field[] = {
973         [REG_NON] = MLX5_MODI_OUT_NONE,
974         [REG_A] = MLX5_MODI_META_DATA_REG_A,
975         [REG_B] = MLX5_MODI_META_DATA_REG_B,
976         [REG_C_0] = MLX5_MODI_META_REG_C_0,
977         [REG_C_1] = MLX5_MODI_META_REG_C_1,
978         [REG_C_2] = MLX5_MODI_META_REG_C_2,
979         [REG_C_3] = MLX5_MODI_META_REG_C_3,
980         [REG_C_4] = MLX5_MODI_META_REG_C_4,
981         [REG_C_5] = MLX5_MODI_META_REG_C_5,
982         [REG_C_6] = MLX5_MODI_META_REG_C_6,
983         [REG_C_7] = MLX5_MODI_META_REG_C_7,
984 };
985
986 /**
987  * Convert register set to DV specification.
988  *
989  * @param[in,out] resource
990  *   Pointer to the modify-header resource.
991  * @param[in] action
992  *   Pointer to action specification.
993  * @param[out] error
994  *   Pointer to the error structure.
995  *
996  * @return
997  *   0 on success, a negative errno value otherwise and rte_errno is set.
998  */
999 static int
1000 flow_dv_convert_action_set_reg
1001                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1002                          const struct rte_flow_action *action,
1003                          struct rte_flow_error *error)
1004 {
1005         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
1006         struct mlx5_modification_cmd *actions = resource->actions;
1007         uint32_t i = resource->actions_num;
1008
1009         if (i >= MLX5_MAX_MODIFY_NUM)
1010                 return rte_flow_error_set(error, EINVAL,
1011                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1012                                           "too many items to modify");
1013         MLX5_ASSERT(conf->id != REG_NON);
1014         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
1015         actions[i] = (struct mlx5_modification_cmd) {
1016                 .action_type = MLX5_MODIFICATION_TYPE_SET,
1017                 .field = reg_to_field[conf->id],
1018                 .offset = conf->offset,
1019                 .length = conf->length,
1020         };
1021         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1022         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1023         ++i;
1024         resource->actions_num = i;
1025         return 0;
1026 }
1027
1028 /**
1029  * Convert SET_TAG action to DV specification.
1030  *
1031  * @param[in] dev
1032  *   Pointer to the rte_eth_dev structure.
1033  * @param[in,out] resource
1034  *   Pointer to the modify-header resource.
1035  * @param[in] conf
1036  *   Pointer to action specification.
1037  * @param[out] error
1038  *   Pointer to the error structure.
1039  *
1040  * @return
1041  *   0 on success, a negative errno value otherwise and rte_errno is set.
1042  */
1043 static int
1044 flow_dv_convert_action_set_tag
1045                         (struct rte_eth_dev *dev,
1046                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1047                          const struct rte_flow_action_set_tag *conf,
1048                          struct rte_flow_error *error)
1049 {
1050         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1051         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1052         struct rte_flow_item item = {
1053                 .spec = &data,
1054                 .mask = &mask,
1055         };
1056         struct field_modify_info reg_c_x[] = {
1057                 [1] = {0, 0, 0},
1058         };
1059         enum mlx5_modification_field reg_type;
1060         int ret;
1061
1062         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1063         if (ret < 0)
1064                 return ret;
1065         MLX5_ASSERT(ret != REG_NON);
1066         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1067         reg_type = reg_to_field[ret];
1068         MLX5_ASSERT(reg_type > 0);
1069         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1070         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1071                                              MLX5_MODIFICATION_TYPE_SET, error);
1072 }
1073
1074 /**
1075  * Convert internal COPY_REG action to DV specification.
1076  *
1077  * @param[in] dev
1078  *   Pointer to the rte_eth_dev structure.
1079  * @param[in,out] res
1080  *   Pointer to the modify-header resource.
1081  * @param[in] action
1082  *   Pointer to action specification.
1083  * @param[out] error
1084  *   Pointer to the error structure.
1085  *
1086  * @return
1087  *   0 on success, a negative errno value otherwise and rte_errno is set.
1088  */
1089 static int
1090 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1091                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1092                                  const struct rte_flow_action *action,
1093                                  struct rte_flow_error *error)
1094 {
1095         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1096         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1097         struct rte_flow_item item = {
1098                 .spec = NULL,
1099                 .mask = &mask,
1100         };
1101         struct field_modify_info reg_src[] = {
1102                 {4, 0, reg_to_field[conf->src]},
1103                 {0, 0, 0},
1104         };
1105         struct field_modify_info reg_dst = {
1106                 .offset = 0,
1107                 .id = reg_to_field[conf->dst],
1108         };
1109         /* Adjust reg_c[0] usage according to reported mask. */
1110         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1111                 struct mlx5_priv *priv = dev->data->dev_private;
1112                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1113
1114                 MLX5_ASSERT(reg_c0);
1115                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1116                 if (conf->dst == REG_C_0) {
1117                         /* Copy to reg_c[0], within mask only. */
1118                         reg_dst.offset = rte_bsf32(reg_c0);
1119                         /*
1120                          * Mask is ignoring the enianness, because
1121                          * there is no conversion in datapath.
1122                          */
1123 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1124                         /* Copy from destination lower bits to reg_c[0]. */
1125                         mask = reg_c0 >> reg_dst.offset;
1126 #else
1127                         /* Copy from destination upper bits to reg_c[0]. */
1128                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1129                                           rte_fls_u32(reg_c0));
1130 #endif
1131                 } else {
1132                         mask = rte_cpu_to_be_32(reg_c0);
1133 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1134                         /* Copy from reg_c[0] to destination lower bits. */
1135                         reg_dst.offset = 0;
1136 #else
1137                         /* Copy from reg_c[0] to destination upper bits. */
1138                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1139                                          (rte_fls_u32(reg_c0) -
1140                                           rte_bsf32(reg_c0));
1141 #endif
1142                 }
1143         }
1144         return flow_dv_convert_modify_action(&item,
1145                                              reg_src, &reg_dst, res,
1146                                              MLX5_MODIFICATION_TYPE_COPY,
1147                                              error);
1148 }
1149
1150 /**
1151  * Convert MARK action to DV specification. This routine is used
1152  * in extensive metadata only and requires metadata register to be
1153  * handled. In legacy mode hardware tag resource is engaged.
1154  *
1155  * @param[in] dev
1156  *   Pointer to the rte_eth_dev structure.
1157  * @param[in] conf
1158  *   Pointer to MARK action specification.
1159  * @param[in,out] resource
1160  *   Pointer to the modify-header resource.
1161  * @param[out] error
1162  *   Pointer to the error structure.
1163  *
1164  * @return
1165  *   0 on success, a negative errno value otherwise and rte_errno is set.
1166  */
1167 static int
1168 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1169                             const struct rte_flow_action_mark *conf,
1170                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1171                             struct rte_flow_error *error)
1172 {
1173         struct mlx5_priv *priv = dev->data->dev_private;
1174         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1175                                            priv->sh->dv_mark_mask);
1176         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1177         struct rte_flow_item item = {
1178                 .spec = &data,
1179                 .mask = &mask,
1180         };
1181         struct field_modify_info reg_c_x[] = {
1182                 [1] = {0, 0, 0},
1183         };
1184         int reg;
1185
1186         if (!mask)
1187                 return rte_flow_error_set(error, EINVAL,
1188                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1189                                           NULL, "zero mark action mask");
1190         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1191         if (reg < 0)
1192                 return reg;
1193         MLX5_ASSERT(reg > 0);
1194         if (reg == REG_C_0) {
1195                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1196                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1197
1198                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1199                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1200                 mask = rte_cpu_to_be_32(mask << shl_c0);
1201         }
1202         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1203         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1204                                              MLX5_MODIFICATION_TYPE_SET, error);
1205 }
1206
1207 /**
1208  * Get metadata register index for specified steering domain.
1209  *
1210  * @param[in] dev
1211  *   Pointer to the rte_eth_dev structure.
1212  * @param[in] attr
1213  *   Attributes of flow to determine steering domain.
1214  * @param[out] error
1215  *   Pointer to the error structure.
1216  *
1217  * @return
1218  *   positive index on success, a negative errno value otherwise
1219  *   and rte_errno is set.
1220  */
1221 static enum modify_reg
1222 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1223                          const struct rte_flow_attr *attr,
1224                          struct rte_flow_error *error)
1225 {
1226         int reg =
1227                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1228                                           MLX5_METADATA_FDB :
1229                                             attr->egress ?
1230                                             MLX5_METADATA_TX :
1231                                             MLX5_METADATA_RX, 0, error);
1232         if (reg < 0)
1233                 return rte_flow_error_set(error,
1234                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1235                                           NULL, "unavailable "
1236                                           "metadata register");
1237         return reg;
1238 }
1239
1240 /**
1241  * Convert SET_META action to DV specification.
1242  *
1243  * @param[in] dev
1244  *   Pointer to the rte_eth_dev structure.
1245  * @param[in,out] resource
1246  *   Pointer to the modify-header resource.
1247  * @param[in] attr
1248  *   Attributes of flow that includes this item.
1249  * @param[in] conf
1250  *   Pointer to action specification.
1251  * @param[out] error
1252  *   Pointer to the error structure.
1253  *
1254  * @return
1255  *   0 on success, a negative errno value otherwise and rte_errno is set.
1256  */
1257 static int
1258 flow_dv_convert_action_set_meta
1259                         (struct rte_eth_dev *dev,
1260                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1261                          const struct rte_flow_attr *attr,
1262                          const struct rte_flow_action_set_meta *conf,
1263                          struct rte_flow_error *error)
1264 {
1265         uint32_t data = conf->data;
1266         uint32_t mask = conf->mask;
1267         struct rte_flow_item item = {
1268                 .spec = &data,
1269                 .mask = &mask,
1270         };
1271         struct field_modify_info reg_c_x[] = {
1272                 [1] = {0, 0, 0},
1273         };
1274         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1275
1276         if (reg < 0)
1277                 return reg;
1278         MLX5_ASSERT(reg != REG_NON);
1279         /*
1280          * In datapath code there is no endianness
1281          * coversions for perfromance reasons, all
1282          * pattern conversions are done in rte_flow.
1283          */
1284         if (reg == REG_C_0) {
1285                 struct mlx5_priv *priv = dev->data->dev_private;
1286                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1287                 uint32_t shl_c0;
1288
1289                 MLX5_ASSERT(msk_c0);
1290 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1291                 shl_c0 = rte_bsf32(msk_c0);
1292 #else
1293                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1294 #endif
1295                 mask <<= shl_c0;
1296                 data <<= shl_c0;
1297                 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1298         }
1299         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1300         /* The routine expects parameters in memory as big-endian ones. */
1301         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1302                                              MLX5_MODIFICATION_TYPE_SET, error);
1303 }
1304
1305 /**
1306  * Convert modify-header set IPv4 DSCP action to DV specification.
1307  *
1308  * @param[in,out] resource
1309  *   Pointer to the modify-header resource.
1310  * @param[in] action
1311  *   Pointer to action specification.
1312  * @param[out] error
1313  *   Pointer to the error structure.
1314  *
1315  * @return
1316  *   0 on success, a negative errno value otherwise and rte_errno is set.
1317  */
1318 static int
1319 flow_dv_convert_action_modify_ipv4_dscp
1320                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1321                          const struct rte_flow_action *action,
1322                          struct rte_flow_error *error)
1323 {
1324         const struct rte_flow_action_set_dscp *conf =
1325                 (const struct rte_flow_action_set_dscp *)(action->conf);
1326         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1327         struct rte_flow_item_ipv4 ipv4;
1328         struct rte_flow_item_ipv4 ipv4_mask;
1329
1330         memset(&ipv4, 0, sizeof(ipv4));
1331         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1332         ipv4.hdr.type_of_service = conf->dscp;
1333         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1334         item.spec = &ipv4;
1335         item.mask = &ipv4_mask;
1336         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1337                                              MLX5_MODIFICATION_TYPE_SET, error);
1338 }
1339
1340 /**
1341  * Convert modify-header set IPv6 DSCP action to DV specification.
1342  *
1343  * @param[in,out] resource
1344  *   Pointer to the modify-header resource.
1345  * @param[in] action
1346  *   Pointer to action specification.
1347  * @param[out] error
1348  *   Pointer to the error structure.
1349  *
1350  * @return
1351  *   0 on success, a negative errno value otherwise and rte_errno is set.
1352  */
1353 static int
1354 flow_dv_convert_action_modify_ipv6_dscp
1355                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1356                          const struct rte_flow_action *action,
1357                          struct rte_flow_error *error)
1358 {
1359         const struct rte_flow_action_set_dscp *conf =
1360                 (const struct rte_flow_action_set_dscp *)(action->conf);
1361         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1362         struct rte_flow_item_ipv6 ipv6;
1363         struct rte_flow_item_ipv6 ipv6_mask;
1364
1365         memset(&ipv6, 0, sizeof(ipv6));
1366         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1367         /*
1368          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1369          * rdma-core only accept the DSCP bits byte aligned start from
1370          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1371          * bits in IPv6 case as rdma-core requires byte aligned value.
1372          */
1373         ipv6.hdr.vtc_flow = conf->dscp;
1374         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1375         item.spec = &ipv6;
1376         item.mask = &ipv6_mask;
1377         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1378                                              MLX5_MODIFICATION_TYPE_SET, error);
1379 }
1380
1381 static int
1382 mlx5_flow_item_field_width(struct mlx5_dev_config *config,
1383                            enum rte_flow_field_id field)
1384 {
1385         switch (field) {
1386         case RTE_FLOW_FIELD_START:
1387                 return 32;
1388         case RTE_FLOW_FIELD_MAC_DST:
1389         case RTE_FLOW_FIELD_MAC_SRC:
1390                 return 48;
1391         case RTE_FLOW_FIELD_VLAN_TYPE:
1392                 return 16;
1393         case RTE_FLOW_FIELD_VLAN_ID:
1394                 return 12;
1395         case RTE_FLOW_FIELD_MAC_TYPE:
1396                 return 16;
1397         case RTE_FLOW_FIELD_IPV4_DSCP:
1398                 return 6;
1399         case RTE_FLOW_FIELD_IPV4_TTL:
1400                 return 8;
1401         case RTE_FLOW_FIELD_IPV4_SRC:
1402         case RTE_FLOW_FIELD_IPV4_DST:
1403                 return 32;
1404         case RTE_FLOW_FIELD_IPV6_DSCP:
1405                 return 6;
1406         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1407                 return 8;
1408         case RTE_FLOW_FIELD_IPV6_SRC:
1409         case RTE_FLOW_FIELD_IPV6_DST:
1410                 return 128;
1411         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1412         case RTE_FLOW_FIELD_TCP_PORT_DST:
1413                 return 16;
1414         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1415         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1416                 return 32;
1417         case RTE_FLOW_FIELD_TCP_FLAGS:
1418                 return 9;
1419         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1420         case RTE_FLOW_FIELD_UDP_PORT_DST:
1421                 return 16;
1422         case RTE_FLOW_FIELD_VXLAN_VNI:
1423         case RTE_FLOW_FIELD_GENEVE_VNI:
1424                 return 24;
1425         case RTE_FLOW_FIELD_GTP_TEID:
1426         case RTE_FLOW_FIELD_TAG:
1427                 return 32;
1428         case RTE_FLOW_FIELD_MARK:
1429                 return 24;
1430         case RTE_FLOW_FIELD_META:
1431                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_META16)
1432                         return 16;
1433                 else if (config->dv_xmeta_en == MLX5_XMETA_MODE_META32)
1434                         return 32;
1435                 else
1436                         return 0;
1437         case RTE_FLOW_FIELD_POINTER:
1438         case RTE_FLOW_FIELD_VALUE:
1439                 return 64;
1440         default:
1441                 MLX5_ASSERT(false);
1442         }
1443         return 0;
1444 }
1445
1446 static void
1447 mlx5_flow_field_id_to_modify_info
1448                 (const struct rte_flow_action_modify_data *data,
1449                  struct field_modify_info *info,
1450                  uint32_t *mask, uint32_t *value,
1451                  uint32_t width, uint32_t dst_width,
1452                  struct rte_eth_dev *dev,
1453                  const struct rte_flow_attr *attr,
1454                  struct rte_flow_error *error)
1455 {
1456         struct mlx5_priv *priv = dev->data->dev_private;
1457         struct mlx5_dev_config *config = &priv->config;
1458         uint32_t idx = 0;
1459         uint32_t off = 0;
1460         uint64_t val = 0;
1461         switch (data->field) {
1462         case RTE_FLOW_FIELD_START:
1463                 /* not supported yet */
1464                 MLX5_ASSERT(false);
1465                 break;
1466         case RTE_FLOW_FIELD_MAC_DST:
1467                 off = data->offset > 16 ? data->offset - 16 : 0;
1468                 if (mask) {
1469                         if (data->offset < 16) {
1470                                 info[idx] = (struct field_modify_info){2, 0,
1471                                                 MLX5_MODI_OUT_DMAC_15_0};
1472                                 if (width < 16) {
1473                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1474                                                                  (16 - width));
1475                                         width = 0;
1476                                 } else {
1477                                         mask[idx] = RTE_BE16(0xffff);
1478                                         width -= 16;
1479                                 }
1480                                 if (!width)
1481                                         break;
1482                                 ++idx;
1483                         }
1484                         info[idx] = (struct field_modify_info){4, 4 * idx,
1485                                                 MLX5_MODI_OUT_DMAC_47_16};
1486                         mask[idx] = rte_cpu_to_be_32((0xffffffff >>
1487                                                       (32 - width)) << off);
1488                 } else {
1489                         if (data->offset < 16)
1490                                 info[idx++] = (struct field_modify_info){2, 0,
1491                                                 MLX5_MODI_OUT_DMAC_15_0};
1492                         info[idx] = (struct field_modify_info){4, off,
1493                                                 MLX5_MODI_OUT_DMAC_47_16};
1494                 }
1495                 break;
1496         case RTE_FLOW_FIELD_MAC_SRC:
1497                 off = data->offset > 16 ? data->offset - 16 : 0;
1498                 if (mask) {
1499                         if (data->offset < 16) {
1500                                 info[idx] = (struct field_modify_info){2, 0,
1501                                                 MLX5_MODI_OUT_SMAC_15_0};
1502                                 if (width < 16) {
1503                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1504                                                                  (16 - width));
1505                                         width = 0;
1506                                 } else {
1507                                         mask[idx] = RTE_BE16(0xffff);
1508                                         width -= 16;
1509                                 }
1510                                 if (!width)
1511                                         break;
1512                                 ++idx;
1513                         }
1514                         info[idx] = (struct field_modify_info){4, 4 * idx,
1515                                                 MLX5_MODI_OUT_SMAC_47_16};
1516                         mask[idx] = rte_cpu_to_be_32((0xffffffff >>
1517                                                       (32 - width)) << off);
1518                 } else {
1519                         if (data->offset < 16)
1520                                 info[idx++] = (struct field_modify_info){2, 0,
1521                                                 MLX5_MODI_OUT_SMAC_15_0};
1522                         info[idx] = (struct field_modify_info){4, off,
1523                                                 MLX5_MODI_OUT_SMAC_47_16};
1524                 }
1525                 break;
1526         case RTE_FLOW_FIELD_VLAN_TYPE:
1527                 /* not supported yet */
1528                 break;
1529         case RTE_FLOW_FIELD_VLAN_ID:
1530                 info[idx] = (struct field_modify_info){2, 0,
1531                                         MLX5_MODI_OUT_FIRST_VID};
1532                 if (mask)
1533                         mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1534                 break;
1535         case RTE_FLOW_FIELD_MAC_TYPE:
1536                 info[idx] = (struct field_modify_info){2, 0,
1537                                         MLX5_MODI_OUT_ETHERTYPE};
1538                 if (mask)
1539                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1540                 break;
1541         case RTE_FLOW_FIELD_IPV4_DSCP:
1542                 info[idx] = (struct field_modify_info){1, 0,
1543                                         MLX5_MODI_OUT_IP_DSCP};
1544                 if (mask)
1545                         mask[idx] = 0x3f >> (6 - width);
1546                 break;
1547         case RTE_FLOW_FIELD_IPV4_TTL:
1548                 info[idx] = (struct field_modify_info){1, 0,
1549                                         MLX5_MODI_OUT_IPV4_TTL};
1550                 if (mask)
1551                         mask[idx] = 0xff >> (8 - width);
1552                 break;
1553         case RTE_FLOW_FIELD_IPV4_SRC:
1554                 info[idx] = (struct field_modify_info){4, 0,
1555                                         MLX5_MODI_OUT_SIPV4};
1556                 if (mask)
1557                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1558                                                      (32 - width));
1559                 break;
1560         case RTE_FLOW_FIELD_IPV4_DST:
1561                 info[idx] = (struct field_modify_info){4, 0,
1562                                         MLX5_MODI_OUT_DIPV4};
1563                 if (mask)
1564                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1565                                                      (32 - width));
1566                 break;
1567         case RTE_FLOW_FIELD_IPV6_DSCP:
1568                 info[idx] = (struct field_modify_info){1, 0,
1569                                         MLX5_MODI_OUT_IP_DSCP};
1570                 if (mask)
1571                         mask[idx] = 0x3f >> (6 - width);
1572                 break;
1573         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1574                 info[idx] = (struct field_modify_info){1, 0,
1575                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1576                 if (mask)
1577                         mask[idx] = 0xff >> (8 - width);
1578                 break;
1579         case RTE_FLOW_FIELD_IPV6_SRC:
1580                 if (mask) {
1581                         if (data->offset < 32) {
1582                                 info[idx] = (struct field_modify_info){4,
1583                                                 4 * idx,
1584                                                 MLX5_MODI_OUT_SIPV6_31_0};
1585                                 if (width < 32) {
1586                                         mask[idx] =
1587                                                 rte_cpu_to_be_32(0xffffffff >>
1588                                                                  (32 - width));
1589                                         width = 0;
1590                                 } else {
1591                                         mask[idx] = RTE_BE32(0xffffffff);
1592                                         width -= 32;
1593                                 }
1594                                 if (!width)
1595                                         break;
1596                                 ++idx;
1597                         }
1598                         if (data->offset < 64) {
1599                                 info[idx] = (struct field_modify_info){4,
1600                                                 4 * idx,
1601                                                 MLX5_MODI_OUT_SIPV6_63_32};
1602                                 if (width < 32) {
1603                                         mask[idx] =
1604                                                 rte_cpu_to_be_32(0xffffffff >>
1605                                                                  (32 - width));
1606                                         width = 0;
1607                                 } else {
1608                                         mask[idx] = RTE_BE32(0xffffffff);
1609                                         width -= 32;
1610                                 }
1611                                 if (!width)
1612                                         break;
1613                                 ++idx;
1614                         }
1615                         if (data->offset < 96) {
1616                                 info[idx] = (struct field_modify_info){4,
1617                                                 4 * idx,
1618                                                 MLX5_MODI_OUT_SIPV6_95_64};
1619                                 if (width < 32) {
1620                                         mask[idx] =
1621                                                 rte_cpu_to_be_32(0xffffffff >>
1622                                                                  (32 - width));
1623                                         width = 0;
1624                                 } else {
1625                                         mask[idx] = RTE_BE32(0xffffffff);
1626                                         width -= 32;
1627                                 }
1628                                 if (!width)
1629                                         break;
1630                                 ++idx;
1631                         }
1632                         info[idx] = (struct field_modify_info){4, 4 * idx,
1633                                                 MLX5_MODI_OUT_SIPV6_127_96};
1634                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1635                                                      (32 - width));
1636                 } else {
1637                         if (data->offset < 32)
1638                                 info[idx++] = (struct field_modify_info){4, 0,
1639                                                 MLX5_MODI_OUT_SIPV6_31_0};
1640                         if (data->offset < 64)
1641                                 info[idx++] = (struct field_modify_info){4, 0,
1642                                                 MLX5_MODI_OUT_SIPV6_63_32};
1643                         if (data->offset < 96)
1644                                 info[idx++] = (struct field_modify_info){4, 0,
1645                                                 MLX5_MODI_OUT_SIPV6_95_64};
1646                         if (data->offset < 128)
1647                                 info[idx++] = (struct field_modify_info){4, 0,
1648                                                 MLX5_MODI_OUT_SIPV6_127_96};
1649                 }
1650                 break;
1651         case RTE_FLOW_FIELD_IPV6_DST:
1652                 if (mask) {
1653                         if (data->offset < 32) {
1654                                 info[idx] = (struct field_modify_info){4,
1655                                                 4 * idx,
1656                                                 MLX5_MODI_OUT_DIPV6_31_0};
1657                                 if (width < 32) {
1658                                         mask[idx] =
1659                                                 rte_cpu_to_be_32(0xffffffff >>
1660                                                                  (32 - width));
1661                                         width = 0;
1662                                 } else {
1663                                         mask[idx] = RTE_BE32(0xffffffff);
1664                                         width -= 32;
1665                                 }
1666                                 if (!width)
1667                                         break;
1668                                 ++idx;
1669                         }
1670                         if (data->offset < 64) {
1671                                 info[idx] = (struct field_modify_info){4,
1672                                                 4 * idx,
1673                                                 MLX5_MODI_OUT_DIPV6_63_32};
1674                                 if (width < 32) {
1675                                         mask[idx] =
1676                                                 rte_cpu_to_be_32(0xffffffff >>
1677                                                                  (32 - width));
1678                                         width = 0;
1679                                 } else {
1680                                         mask[idx] = RTE_BE32(0xffffffff);
1681                                         width -= 32;
1682                                 }
1683                                 if (!width)
1684                                         break;
1685                                 ++idx;
1686                         }
1687                         if (data->offset < 96) {
1688                                 info[idx] = (struct field_modify_info){4,
1689                                                 4 * idx,
1690                                                 MLX5_MODI_OUT_DIPV6_95_64};
1691                                 if (width < 32) {
1692                                         mask[idx] =
1693                                                 rte_cpu_to_be_32(0xffffffff >>
1694                                                                  (32 - width));
1695                                         width = 0;
1696                                 } else {
1697                                         mask[idx] = RTE_BE32(0xffffffff);
1698                                         width -= 32;
1699                                 }
1700                                 if (!width)
1701                                         break;
1702                                 ++idx;
1703                         }
1704                         info[idx] = (struct field_modify_info){4, 4 * idx,
1705                                                 MLX5_MODI_OUT_DIPV6_127_96};
1706                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1707                                                      (32 - width));
1708                 } else {
1709                         if (data->offset < 32)
1710                                 info[idx++] = (struct field_modify_info){4, 0,
1711                                                 MLX5_MODI_OUT_DIPV6_31_0};
1712                         if (data->offset < 64)
1713                                 info[idx++] = (struct field_modify_info){4, 0,
1714                                                 MLX5_MODI_OUT_DIPV6_63_32};
1715                         if (data->offset < 96)
1716                                 info[idx++] = (struct field_modify_info){4, 0,
1717                                                 MLX5_MODI_OUT_DIPV6_95_64};
1718                         if (data->offset < 128)
1719                                 info[idx++] = (struct field_modify_info){4, 0,
1720                                                 MLX5_MODI_OUT_DIPV6_127_96};
1721                 }
1722                 break;
1723         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1724                 info[idx] = (struct field_modify_info){2, 0,
1725                                         MLX5_MODI_OUT_TCP_SPORT};
1726                 if (mask)
1727                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1728                 break;
1729         case RTE_FLOW_FIELD_TCP_PORT_DST:
1730                 info[idx] = (struct field_modify_info){2, 0,
1731                                         MLX5_MODI_OUT_TCP_DPORT};
1732                 if (mask)
1733                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1734                 break;
1735         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1736                 info[idx] = (struct field_modify_info){4, 0,
1737                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1738                 if (mask)
1739                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1740                                                      (32 - width));
1741                 break;
1742         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1743                 info[idx] = (struct field_modify_info){4, 0,
1744                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1745                 if (mask)
1746                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1747                                                      (32 - width));
1748                 break;
1749         case RTE_FLOW_FIELD_TCP_FLAGS:
1750                 info[idx] = (struct field_modify_info){2, 0,
1751                                         MLX5_MODI_OUT_TCP_FLAGS};
1752                 if (mask)
1753                         mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
1754                 break;
1755         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1756                 info[idx] = (struct field_modify_info){2, 0,
1757                                         MLX5_MODI_OUT_UDP_SPORT};
1758                 if (mask)
1759                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1760                 break;
1761         case RTE_FLOW_FIELD_UDP_PORT_DST:
1762                 info[idx] = (struct field_modify_info){2, 0,
1763                                         MLX5_MODI_OUT_UDP_DPORT};
1764                 if (mask)
1765                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1766                 break;
1767         case RTE_FLOW_FIELD_VXLAN_VNI:
1768                 /* not supported yet */
1769                 break;
1770         case RTE_FLOW_FIELD_GENEVE_VNI:
1771                 /* not supported yet*/
1772                 break;
1773         case RTE_FLOW_FIELD_GTP_TEID:
1774                 info[idx] = (struct field_modify_info){4, 0,
1775                                         MLX5_MODI_GTP_TEID};
1776                 if (mask)
1777                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1778                                                      (32 - width));
1779                 break;
1780         case RTE_FLOW_FIELD_TAG:
1781                 {
1782                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1783                                                    data->level, error);
1784                         if (reg < 0)
1785                                 return;
1786                         MLX5_ASSERT(reg != REG_NON);
1787                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1788                         info[idx] = (struct field_modify_info){4, 0,
1789                                                 reg_to_field[reg]};
1790                         if (mask)
1791                                 mask[idx] =
1792                                         rte_cpu_to_be_32(0xffffffff >>
1793                                                          (32 - width));
1794                 }
1795                 break;
1796         case RTE_FLOW_FIELD_MARK:
1797                 {
1798                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1799                                                        0, error);
1800                         if (reg < 0)
1801                                 return;
1802                         MLX5_ASSERT(reg != REG_NON);
1803                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1804                         info[idx] = (struct field_modify_info){4, 0,
1805                                                 reg_to_field[reg]};
1806                         if (mask)
1807                                 mask[idx] =
1808                                         rte_cpu_to_be_32(0xffffffff >>
1809                                                          (32 - width));
1810                 }
1811                 break;
1812         case RTE_FLOW_FIELD_META:
1813                 {
1814                         unsigned int xmeta = config->dv_xmeta_en;
1815                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1816                         if (reg < 0)
1817                                 return;
1818                         MLX5_ASSERT(reg != REG_NON);
1819                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1820                         if (xmeta == MLX5_XMETA_MODE_META16) {
1821                                 info[idx] = (struct field_modify_info){2, 0,
1822                                                         reg_to_field[reg]};
1823                                 if (mask)
1824                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1825                                                                 (16 - width));
1826                         } else if (xmeta == MLX5_XMETA_MODE_META32) {
1827                                 info[idx] = (struct field_modify_info){4, 0,
1828                                                         reg_to_field[reg]};
1829                                 if (mask)
1830                                         mask[idx] =
1831                                                 rte_cpu_to_be_32(0xffffffff >>
1832                                                                 (32 - width));
1833                         } else {
1834                                 MLX5_ASSERT(false);
1835                         }
1836                 }
1837                 break;
1838         case RTE_FLOW_FIELD_POINTER:
1839         case RTE_FLOW_FIELD_VALUE:
1840                 if (data->field == RTE_FLOW_FIELD_POINTER)
1841                         memcpy(&val, (void *)(uintptr_t)data->value,
1842                                sizeof(uint64_t));
1843                 else
1844                         val = data->value;
1845                 for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
1846                         if (mask[idx]) {
1847                                 if (dst_width == 48) {
1848                                         /*special case for MAC addresses */
1849                                         value[idx] = rte_cpu_to_be_16(val);
1850                                         val >>= 16;
1851                                         dst_width -= 16;
1852                                 } else if (dst_width > 16) {
1853                                         value[idx] = rte_cpu_to_be_32(val);
1854                                         val >>= 32;
1855                                 } else if (dst_width > 8) {
1856                                         value[idx] = rte_cpu_to_be_16(val);
1857                                         val >>= 16;
1858                                 } else {
1859                                         value[idx] = (uint8_t)val;
1860                                         val >>= 8;
1861                                 }
1862                                 if (!val)
1863                                         break;
1864                         }
1865                 }
1866                 break;
1867         default:
1868                 MLX5_ASSERT(false);
1869                 break;
1870         }
1871 }
1872
1873 /**
1874  * Convert modify_field action to DV specification.
1875  *
1876  * @param[in] dev
1877  *   Pointer to the rte_eth_dev structure.
1878  * @param[in,out] resource
1879  *   Pointer to the modify-header resource.
1880  * @param[in] action
1881  *   Pointer to action specification.
1882  * @param[in] attr
1883  *   Attributes of flow that includes this item.
1884  * @param[out] error
1885  *   Pointer to the error structure.
1886  *
1887  * @return
1888  *   0 on success, a negative errno value otherwise and rte_errno is set.
1889  */
1890 static int
1891 flow_dv_convert_action_modify_field
1892                         (struct rte_eth_dev *dev,
1893                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1894                          const struct rte_flow_action *action,
1895                          const struct rte_flow_attr *attr,
1896                          struct rte_flow_error *error)
1897 {
1898         struct mlx5_priv *priv = dev->data->dev_private;
1899         struct mlx5_dev_config *config = &priv->config;
1900         const struct rte_flow_action_modify_field *conf =
1901                 (const struct rte_flow_action_modify_field *)(action->conf);
1902         struct rte_flow_item item;
1903         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1904                                                                 {0, 0, 0} };
1905         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1906                                                                 {0, 0, 0} };
1907         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1908         uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1909         uint32_t type;
1910         uint32_t dst_width = mlx5_flow_item_field_width(config,
1911                                                         conf->dst.field);
1912
1913         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1914                 conf->src.field == RTE_FLOW_FIELD_VALUE) {
1915                 type = MLX5_MODIFICATION_TYPE_SET;
1916                 /** For SET fill the destination field (field) first. */
1917                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1918                         value, conf->width, dst_width, dev, attr, error);
1919                 /** Then copy immediate value from source as per mask. */
1920                 mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask,
1921                         value, conf->width, dst_width, dev, attr, error);
1922                 item.spec = &value;
1923         } else {
1924                 type = MLX5_MODIFICATION_TYPE_COPY;
1925                 /** For COPY fill the destination field (dcopy) without mask. */
1926                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1927                         value, conf->width, dst_width, dev, attr, error);
1928                 /** Then construct the source field (field) with mask. */
1929                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1930                         value, conf->width, dst_width, dev, attr, error);
1931         }
1932         item.mask = &mask;
1933         return flow_dv_convert_modify_action(&item,
1934                         field, dcopy, resource, type, error);
1935 }
1936
1937 /**
1938  * Validate MARK item.
1939  *
1940  * @param[in] dev
1941  *   Pointer to the rte_eth_dev structure.
1942  * @param[in] item
1943  *   Item specification.
1944  * @param[in] attr
1945  *   Attributes of flow that includes this item.
1946  * @param[out] error
1947  *   Pointer to error structure.
1948  *
1949  * @return
1950  *   0 on success, a negative errno value otherwise and rte_errno is set.
1951  */
1952 static int
1953 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1954                            const struct rte_flow_item *item,
1955                            const struct rte_flow_attr *attr __rte_unused,
1956                            struct rte_flow_error *error)
1957 {
1958         struct mlx5_priv *priv = dev->data->dev_private;
1959         struct mlx5_dev_config *config = &priv->config;
1960         const struct rte_flow_item_mark *spec = item->spec;
1961         const struct rte_flow_item_mark *mask = item->mask;
1962         const struct rte_flow_item_mark nic_mask = {
1963                 .id = priv->sh->dv_mark_mask,
1964         };
1965         int ret;
1966
1967         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1968                 return rte_flow_error_set(error, ENOTSUP,
1969                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1970                                           "extended metadata feature"
1971                                           " isn't enabled");
1972         if (!mlx5_flow_ext_mreg_supported(dev))
1973                 return rte_flow_error_set(error, ENOTSUP,
1974                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1975                                           "extended metadata register"
1976                                           " isn't supported");
1977         if (!nic_mask.id)
1978                 return rte_flow_error_set(error, ENOTSUP,
1979                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1980                                           "extended metadata register"
1981                                           " isn't available");
1982         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1983         if (ret < 0)
1984                 return ret;
1985         if (!spec)
1986                 return rte_flow_error_set(error, EINVAL,
1987                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1988                                           item->spec,
1989                                           "data cannot be empty");
1990         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1991                 return rte_flow_error_set(error, EINVAL,
1992                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1993                                           &spec->id,
1994                                           "mark id exceeds the limit");
1995         if (!mask)
1996                 mask = &nic_mask;
1997         if (!mask->id)
1998                 return rte_flow_error_set(error, EINVAL,
1999                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2000                                         "mask cannot be zero");
2001
2002         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2003                                         (const uint8_t *)&nic_mask,
2004                                         sizeof(struct rte_flow_item_mark),
2005                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2006         if (ret < 0)
2007                 return ret;
2008         return 0;
2009 }
2010
2011 /**
2012  * Validate META item.
2013  *
2014  * @param[in] dev
2015  *   Pointer to the rte_eth_dev structure.
2016  * @param[in] item
2017  *   Item specification.
2018  * @param[in] attr
2019  *   Attributes of flow that includes this item.
2020  * @param[out] error
2021  *   Pointer to error structure.
2022  *
2023  * @return
2024  *   0 on success, a negative errno value otherwise and rte_errno is set.
2025  */
2026 static int
2027 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
2028                            const struct rte_flow_item *item,
2029                            const struct rte_flow_attr *attr,
2030                            struct rte_flow_error *error)
2031 {
2032         struct mlx5_priv *priv = dev->data->dev_private;
2033         struct mlx5_dev_config *config = &priv->config;
2034         const struct rte_flow_item_meta *spec = item->spec;
2035         const struct rte_flow_item_meta *mask = item->mask;
2036         struct rte_flow_item_meta nic_mask = {
2037                 .data = UINT32_MAX
2038         };
2039         int reg;
2040         int ret;
2041
2042         if (!spec)
2043                 return rte_flow_error_set(error, EINVAL,
2044                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2045                                           item->spec,
2046                                           "data cannot be empty");
2047         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2048                 if (!mlx5_flow_ext_mreg_supported(dev))
2049                         return rte_flow_error_set(error, ENOTSUP,
2050                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2051                                           "extended metadata register"
2052                                           " isn't supported");
2053                 reg = flow_dv_get_metadata_reg(dev, attr, error);
2054                 if (reg < 0)
2055                         return reg;
2056                 if (reg == REG_NON)
2057                         return rte_flow_error_set(error, ENOTSUP,
2058                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2059                                         "unavalable extended metadata register");
2060                 if (reg == REG_B)
2061                         return rte_flow_error_set(error, ENOTSUP,
2062                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2063                                           "match on reg_b "
2064                                           "isn't supported");
2065                 if (reg != REG_A)
2066                         nic_mask.data = priv->sh->dv_meta_mask;
2067         } else {
2068                 if (attr->transfer)
2069                         return rte_flow_error_set(error, ENOTSUP,
2070                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2071                                         "extended metadata feature "
2072                                         "should be enabled when "
2073                                         "meta item is requested "
2074                                         "with e-switch mode ");
2075                 if (attr->ingress)
2076                         return rte_flow_error_set(error, ENOTSUP,
2077                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2078                                         "match on metadata for ingress "
2079                                         "is not supported in legacy "
2080                                         "metadata mode");
2081         }
2082         if (!mask)
2083                 mask = &rte_flow_item_meta_mask;
2084         if (!mask->data)
2085                 return rte_flow_error_set(error, EINVAL,
2086                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2087                                         "mask cannot be zero");
2088
2089         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2090                                         (const uint8_t *)&nic_mask,
2091                                         sizeof(struct rte_flow_item_meta),
2092                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2093         return ret;
2094 }
2095
2096 /**
2097  * Validate TAG item.
2098  *
2099  * @param[in] dev
2100  *   Pointer to the rte_eth_dev structure.
2101  * @param[in] item
2102  *   Item specification.
2103  * @param[in] attr
2104  *   Attributes of flow that includes this item.
2105  * @param[out] error
2106  *   Pointer to error structure.
2107  *
2108  * @return
2109  *   0 on success, a negative errno value otherwise and rte_errno is set.
2110  */
2111 static int
2112 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2113                           const struct rte_flow_item *item,
2114                           const struct rte_flow_attr *attr __rte_unused,
2115                           struct rte_flow_error *error)
2116 {
2117         const struct rte_flow_item_tag *spec = item->spec;
2118         const struct rte_flow_item_tag *mask = item->mask;
2119         const struct rte_flow_item_tag nic_mask = {
2120                 .data = RTE_BE32(UINT32_MAX),
2121                 .index = 0xff,
2122         };
2123         int ret;
2124
2125         if (!mlx5_flow_ext_mreg_supported(dev))
2126                 return rte_flow_error_set(error, ENOTSUP,
2127                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2128                                           "extensive metadata register"
2129                                           " isn't supported");
2130         if (!spec)
2131                 return rte_flow_error_set(error, EINVAL,
2132                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2133                                           item->spec,
2134                                           "data cannot be empty");
2135         if (!mask)
2136                 mask = &rte_flow_item_tag_mask;
2137         if (!mask->data)
2138                 return rte_flow_error_set(error, EINVAL,
2139                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2140                                         "mask cannot be zero");
2141
2142         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2143                                         (const uint8_t *)&nic_mask,
2144                                         sizeof(struct rte_flow_item_tag),
2145                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2146         if (ret < 0)
2147                 return ret;
2148         if (mask->index != 0xff)
2149                 return rte_flow_error_set(error, EINVAL,
2150                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2151                                           "partial mask for tag index"
2152                                           " is not supported");
2153         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2154         if (ret < 0)
2155                 return ret;
2156         MLX5_ASSERT(ret != REG_NON);
2157         return 0;
2158 }
2159
2160 /**
2161  * Validate vport item.
2162  *
2163  * @param[in] dev
2164  *   Pointer to the rte_eth_dev structure.
2165  * @param[in] item
2166  *   Item specification.
2167  * @param[in] attr
2168  *   Attributes of flow that includes this item.
2169  * @param[in] item_flags
2170  *   Bit-fields that holds the items detected until now.
2171  * @param[out] error
2172  *   Pointer to error structure.
2173  *
2174  * @return
2175  *   0 on success, a negative errno value otherwise and rte_errno is set.
2176  */
2177 static int
2178 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2179                               const struct rte_flow_item *item,
2180                               const struct rte_flow_attr *attr,
2181                               uint64_t item_flags,
2182                               struct rte_flow_error *error)
2183 {
2184         const struct rte_flow_item_port_id *spec = item->spec;
2185         const struct rte_flow_item_port_id *mask = item->mask;
2186         const struct rte_flow_item_port_id switch_mask = {
2187                         .id = 0xffffffff,
2188         };
2189         struct mlx5_priv *esw_priv;
2190         struct mlx5_priv *dev_priv;
2191         int ret;
2192
2193         if (!attr->transfer)
2194                 return rte_flow_error_set(error, EINVAL,
2195                                           RTE_FLOW_ERROR_TYPE_ITEM,
2196                                           NULL,
2197                                           "match on port id is valid only"
2198                                           " when transfer flag is enabled");
2199         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2200                 return rte_flow_error_set(error, ENOTSUP,
2201                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2202                                           "multiple source ports are not"
2203                                           " supported");
2204         if (!mask)
2205                 mask = &switch_mask;
2206         if (mask->id != 0xffffffff)
2207                 return rte_flow_error_set(error, ENOTSUP,
2208                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2209                                            mask,
2210                                            "no support for partial mask on"
2211                                            " \"id\" field");
2212         ret = mlx5_flow_item_acceptable
2213                                 (item, (const uint8_t *)mask,
2214                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2215                                  sizeof(struct rte_flow_item_port_id),
2216                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2217         if (ret)
2218                 return ret;
2219         if (!spec)
2220                 return 0;
2221         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2222         if (!esw_priv)
2223                 return rte_flow_error_set(error, rte_errno,
2224                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2225                                           "failed to obtain E-Switch info for"
2226                                           " port");
2227         dev_priv = mlx5_dev_to_eswitch_info(dev);
2228         if (!dev_priv)
2229                 return rte_flow_error_set(error, rte_errno,
2230                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2231                                           NULL,
2232                                           "failed to obtain E-Switch info");
2233         if (esw_priv->domain_id != dev_priv->domain_id)
2234                 return rte_flow_error_set(error, EINVAL,
2235                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2236                                           "cannot match on a port from a"
2237                                           " different E-Switch");
2238         return 0;
2239 }
2240
2241 /**
2242  * Validate VLAN item.
2243  *
2244  * @param[in] item
2245  *   Item specification.
2246  * @param[in] item_flags
2247  *   Bit-fields that holds the items detected until now.
2248  * @param[in] dev
2249  *   Ethernet device flow is being created on.
2250  * @param[out] error
2251  *   Pointer to error structure.
2252  *
2253  * @return
2254  *   0 on success, a negative errno value otherwise and rte_errno is set.
2255  */
2256 static int
2257 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2258                            uint64_t item_flags,
2259                            struct rte_eth_dev *dev,
2260                            struct rte_flow_error *error)
2261 {
2262         const struct rte_flow_item_vlan *mask = item->mask;
2263         const struct rte_flow_item_vlan nic_mask = {
2264                 .tci = RTE_BE16(UINT16_MAX),
2265                 .inner_type = RTE_BE16(UINT16_MAX),
2266                 .has_more_vlan = 1,
2267         };
2268         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2269         int ret;
2270         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2271                                         MLX5_FLOW_LAYER_INNER_L4) :
2272                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2273                                         MLX5_FLOW_LAYER_OUTER_L4);
2274         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2275                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2276
2277         if (item_flags & vlanm)
2278                 return rte_flow_error_set(error, EINVAL,
2279                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2280                                           "multiple VLAN layers not supported");
2281         else if ((item_flags & l34m) != 0)
2282                 return rte_flow_error_set(error, EINVAL,
2283                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2284                                           "VLAN cannot follow L3/L4 layer");
2285         if (!mask)
2286                 mask = &rte_flow_item_vlan_mask;
2287         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2288                                         (const uint8_t *)&nic_mask,
2289                                         sizeof(struct rte_flow_item_vlan),
2290                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2291         if (ret)
2292                 return ret;
2293         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2294                 struct mlx5_priv *priv = dev->data->dev_private;
2295
2296                 if (priv->vmwa_context) {
2297                         /*
2298                          * Non-NULL context means we have a virtual machine
2299                          * and SR-IOV enabled, we have to create VLAN interface
2300                          * to make hypervisor to setup E-Switch vport
2301                          * context correctly. We avoid creating the multiple
2302                          * VLAN interfaces, so we cannot support VLAN tag mask.
2303                          */
2304                         return rte_flow_error_set(error, EINVAL,
2305                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2306                                                   item,
2307                                                   "VLAN tag mask is not"
2308                                                   " supported in virtual"
2309                                                   " environment");
2310                 }
2311         }
2312         return 0;
2313 }
2314
2315 /*
2316  * GTP flags are contained in 1 byte of the format:
2317  * -------------------------------------------
2318  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2319  * |-----------------------------------------|
2320  * | value | Version | PT | Res | E | S | PN |
2321  * -------------------------------------------
2322  *
2323  * Matching is supported only for GTP flags E, S, PN.
2324  */
2325 #define MLX5_GTP_FLAGS_MASK     0x07
2326
2327 /**
2328  * Validate GTP item.
2329  *
2330  * @param[in] dev
2331  *   Pointer to the rte_eth_dev structure.
2332  * @param[in] item
2333  *   Item specification.
2334  * @param[in] item_flags
2335  *   Bit-fields that holds the items detected until now.
2336  * @param[out] error
2337  *   Pointer to error structure.
2338  *
2339  * @return
2340  *   0 on success, a negative errno value otherwise and rte_errno is set.
2341  */
2342 static int
2343 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2344                           const struct rte_flow_item *item,
2345                           uint64_t item_flags,
2346                           struct rte_flow_error *error)
2347 {
2348         struct mlx5_priv *priv = dev->data->dev_private;
2349         const struct rte_flow_item_gtp *spec = item->spec;
2350         const struct rte_flow_item_gtp *mask = item->mask;
2351         const struct rte_flow_item_gtp nic_mask = {
2352                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2353                 .msg_type = 0xff,
2354                 .teid = RTE_BE32(0xffffffff),
2355         };
2356
2357         if (!priv->config.hca_attr.tunnel_stateless_gtp)
2358                 return rte_flow_error_set(error, ENOTSUP,
2359                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2360                                           "GTP support is not enabled");
2361         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2362                 return rte_flow_error_set(error, ENOTSUP,
2363                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2364                                           "multiple tunnel layers not"
2365                                           " supported");
2366         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2367                 return rte_flow_error_set(error, EINVAL,
2368                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2369                                           "no outer UDP layer found");
2370         if (!mask)
2371                 mask = &rte_flow_item_gtp_mask;
2372         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2373                 return rte_flow_error_set(error, ENOTSUP,
2374                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2375                                           "Match is supported for GTP"
2376                                           " flags only");
2377         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2378                                          (const uint8_t *)&nic_mask,
2379                                          sizeof(struct rte_flow_item_gtp),
2380                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2381 }
2382
2383 /**
2384  * Validate GTP PSC item.
2385  *
2386  * @param[in] item
2387  *   Item specification.
2388  * @param[in] last_item
2389  *   Previous validated item in the pattern items.
2390  * @param[in] gtp_item
2391  *   Previous GTP item specification.
2392  * @param[in] attr
2393  *   Pointer to flow attributes.
2394  * @param[out] error
2395  *   Pointer to error structure.
2396  *
2397  * @return
2398  *   0 on success, a negative errno value otherwise and rte_errno is set.
2399  */
2400 static int
2401 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2402                               uint64_t last_item,
2403                               const struct rte_flow_item *gtp_item,
2404                               const struct rte_flow_attr *attr,
2405                               struct rte_flow_error *error)
2406 {
2407         const struct rte_flow_item_gtp *gtp_spec;
2408         const struct rte_flow_item_gtp *gtp_mask;
2409         const struct rte_flow_item_gtp_psc *spec;
2410         const struct rte_flow_item_gtp_psc *mask;
2411         const struct rte_flow_item_gtp_psc nic_mask = {
2412                 .pdu_type = 0xFF,
2413                 .qfi = 0xFF,
2414         };
2415
2416         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2417                 return rte_flow_error_set
2418                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2419                          "GTP PSC item must be preceded with GTP item");
2420         gtp_spec = gtp_item->spec;
2421         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2422         /* GTP spec and E flag is requested to match zero. */
2423         if (gtp_spec &&
2424                 (gtp_mask->v_pt_rsv_flags &
2425                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2426                 return rte_flow_error_set
2427                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2428                          "GTP E flag must be 1 to match GTP PSC");
2429         /* Check the flow is not created in group zero. */
2430         if (!attr->transfer && !attr->group)
2431                 return rte_flow_error_set
2432                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2433                          "GTP PSC is not supported for group 0");
2434         /* GTP spec is here and E flag is requested to match zero. */
2435         if (!item->spec)
2436                 return 0;
2437         spec = item->spec;
2438         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2439         if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE)
2440                 return rte_flow_error_set
2441                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2442                          "PDU type should be smaller than 16");
2443         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2444                                          (const uint8_t *)&nic_mask,
2445                                          sizeof(struct rte_flow_item_gtp_psc),
2446                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2447 }
2448
2449 /**
2450  * Validate IPV4 item.
2451  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2452  * add specific validation of fragment_offset field,
2453  *
2454  * @param[in] item
2455  *   Item specification.
2456  * @param[in] item_flags
2457  *   Bit-fields that holds the items detected until now.
2458  * @param[out] error
2459  *   Pointer to error structure.
2460  *
2461  * @return
2462  *   0 on success, a negative errno value otherwise and rte_errno is set.
2463  */
2464 static int
2465 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
2466                            uint64_t item_flags,
2467                            uint64_t last_item,
2468                            uint16_t ether_type,
2469                            struct rte_flow_error *error)
2470 {
2471         int ret;
2472         const struct rte_flow_item_ipv4 *spec = item->spec;
2473         const struct rte_flow_item_ipv4 *last = item->last;
2474         const struct rte_flow_item_ipv4 *mask = item->mask;
2475         rte_be16_t fragment_offset_spec = 0;
2476         rte_be16_t fragment_offset_last = 0;
2477         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
2478                 .hdr = {
2479                         .src_addr = RTE_BE32(0xffffffff),
2480                         .dst_addr = RTE_BE32(0xffffffff),
2481                         .type_of_service = 0xff,
2482                         .fragment_offset = RTE_BE16(0xffff),
2483                         .next_proto_id = 0xff,
2484                         .time_to_live = 0xff,
2485                 },
2486         };
2487
2488         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2489                                            ether_type, &nic_ipv4_mask,
2490                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2491         if (ret < 0)
2492                 return ret;
2493         if (spec && mask)
2494                 fragment_offset_spec = spec->hdr.fragment_offset &
2495                                        mask->hdr.fragment_offset;
2496         if (!fragment_offset_spec)
2497                 return 0;
2498         /*
2499          * spec and mask are valid, enforce using full mask to make sure the
2500          * complete value is used correctly.
2501          */
2502         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2503                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2504                 return rte_flow_error_set(error, EINVAL,
2505                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2506                                           item, "must use full mask for"
2507                                           " fragment_offset");
2508         /*
2509          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2510          * indicating this is 1st fragment of fragmented packet.
2511          * This is not yet supported in MLX5, return appropriate error message.
2512          */
2513         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2514                 return rte_flow_error_set(error, ENOTSUP,
2515                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2516                                           "match on first fragment not "
2517                                           "supported");
2518         if (fragment_offset_spec && !last)
2519                 return rte_flow_error_set(error, ENOTSUP,
2520                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2521                                           "specified value not supported");
2522         /* spec and last are valid, validate the specified range. */
2523         fragment_offset_last = last->hdr.fragment_offset &
2524                                mask->hdr.fragment_offset;
2525         /*
2526          * Match on fragment_offset spec 0x2001 and last 0x3fff
2527          * means MF is 1 and frag-offset is > 0.
2528          * This packet is fragment 2nd and onward, excluding last.
2529          * This is not yet supported in MLX5, return appropriate
2530          * error message.
2531          */
2532         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2533             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2534                 return rte_flow_error_set(error, ENOTSUP,
2535                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2536                                           last, "match on following "
2537                                           "fragments not supported");
2538         /*
2539          * Match on fragment_offset spec 0x0001 and last 0x1fff
2540          * means MF is 0 and frag-offset is > 0.
2541          * This packet is last fragment of fragmented packet.
2542          * This is not yet supported in MLX5, return appropriate
2543          * error message.
2544          */
2545         if (fragment_offset_spec == RTE_BE16(1) &&
2546             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2547                 return rte_flow_error_set(error, ENOTSUP,
2548                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2549                                           last, "match on last "
2550                                           "fragment not supported");
2551         /*
2552          * Match on fragment_offset spec 0x0001 and last 0x3fff
2553          * means MF and/or frag-offset is not 0.
2554          * This is a fragmented packet.
2555          * Other range values are invalid and rejected.
2556          */
2557         if (!(fragment_offset_spec == RTE_BE16(1) &&
2558               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2559                 return rte_flow_error_set(error, ENOTSUP,
2560                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2561                                           "specified range not supported");
2562         return 0;
2563 }
2564
2565 /**
2566  * Validate IPV6 fragment extension item.
2567  *
2568  * @param[in] item
2569  *   Item specification.
2570  * @param[in] item_flags
2571  *   Bit-fields that holds the items detected until now.
2572  * @param[out] error
2573  *   Pointer to error structure.
2574  *
2575  * @return
2576  *   0 on success, a negative errno value otherwise and rte_errno is set.
2577  */
2578 static int
2579 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2580                                     uint64_t item_flags,
2581                                     struct rte_flow_error *error)
2582 {
2583         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2584         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2585         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2586         rte_be16_t frag_data_spec = 0;
2587         rte_be16_t frag_data_last = 0;
2588         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2589         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2590                                       MLX5_FLOW_LAYER_OUTER_L4;
2591         int ret = 0;
2592         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2593                 .hdr = {
2594                         .next_header = 0xff,
2595                         .frag_data = RTE_BE16(0xffff),
2596                 },
2597         };
2598
2599         if (item_flags & l4m)
2600                 return rte_flow_error_set(error, EINVAL,
2601                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2602                                           "ipv6 fragment extension item cannot "
2603                                           "follow L4 item.");
2604         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2605             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2606                 return rte_flow_error_set(error, EINVAL,
2607                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2608                                           "ipv6 fragment extension item must "
2609                                           "follow ipv6 item");
2610         if (spec && mask)
2611                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2612         if (!frag_data_spec)
2613                 return 0;
2614         /*
2615          * spec and mask are valid, enforce using full mask to make sure the
2616          * complete value is used correctly.
2617          */
2618         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2619                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2620                 return rte_flow_error_set(error, EINVAL,
2621                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2622                                           item, "must use full mask for"
2623                                           " frag_data");
2624         /*
2625          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2626          * This is 1st fragment of fragmented packet.
2627          */
2628         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2629                 return rte_flow_error_set(error, ENOTSUP,
2630                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2631                                           "match on first fragment not "
2632                                           "supported");
2633         if (frag_data_spec && !last)
2634                 return rte_flow_error_set(error, EINVAL,
2635                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2636                                           "specified value not supported");
2637         ret = mlx5_flow_item_acceptable
2638                                 (item, (const uint8_t *)mask,
2639                                  (const uint8_t *)&nic_mask,
2640                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2641                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2642         if (ret)
2643                 return ret;
2644         /* spec and last are valid, validate the specified range. */
2645         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2646         /*
2647          * Match on frag_data spec 0x0009 and last 0xfff9
2648          * means M is 1 and frag-offset is > 0.
2649          * This packet is fragment 2nd and onward, excluding last.
2650          * This is not yet supported in MLX5, return appropriate
2651          * error message.
2652          */
2653         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2654                                        RTE_IPV6_EHDR_MF_MASK) &&
2655             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2656                 return rte_flow_error_set(error, ENOTSUP,
2657                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2658                                           last, "match on following "
2659                                           "fragments not supported");
2660         /*
2661          * Match on frag_data spec 0x0008 and last 0xfff8
2662          * means M is 0 and frag-offset is > 0.
2663          * This packet is last fragment of fragmented packet.
2664          * This is not yet supported in MLX5, return appropriate
2665          * error message.
2666          */
2667         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2668             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2669                 return rte_flow_error_set(error, ENOTSUP,
2670                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2671                                           last, "match on last "
2672                                           "fragment not supported");
2673         /* Other range values are invalid and rejected. */
2674         return rte_flow_error_set(error, EINVAL,
2675                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2676                                   "specified range not supported");
2677 }
2678
2679 /*
2680  * Validate ASO CT item.
2681  *
2682  * @param[in] dev
2683  *   Pointer to the rte_eth_dev structure.
2684  * @param[in] item
2685  *   Item specification.
2686  * @param[in] item_flags
2687  *   Pointer to bit-fields that holds the items detected until now.
2688  * @param[out] error
2689  *   Pointer to error structure.
2690  *
2691  * @return
2692  *   0 on success, a negative errno value otherwise and rte_errno is set.
2693  */
2694 static int
2695 flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
2696                              const struct rte_flow_item *item,
2697                              uint64_t *item_flags,
2698                              struct rte_flow_error *error)
2699 {
2700         const struct rte_flow_item_conntrack *spec = item->spec;
2701         const struct rte_flow_item_conntrack *mask = item->mask;
2702         RTE_SET_USED(dev);
2703         uint32_t flags;
2704
2705         if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
2706                 return rte_flow_error_set(error, EINVAL,
2707                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2708                                           "Only one CT is supported");
2709         if (!mask)
2710                 mask = &rte_flow_item_conntrack_mask;
2711         flags = spec->flags & mask->flags;
2712         if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
2713             ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
2714              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
2715              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
2716                 return rte_flow_error_set(error, EINVAL,
2717                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2718                                           "Conflict status bits");
2719         /* State change also needs to be considered. */
2720         *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
2721         return 0;
2722 }
2723
2724 /**
2725  * Validate the pop VLAN action.
2726  *
2727  * @param[in] dev
2728  *   Pointer to the rte_eth_dev structure.
2729  * @param[in] action_flags
2730  *   Holds the actions detected until now.
2731  * @param[in] action
2732  *   Pointer to the pop vlan action.
2733  * @param[in] item_flags
2734  *   The items found in this flow rule.
2735  * @param[in] attr
2736  *   Pointer to flow attributes.
2737  * @param[out] error
2738  *   Pointer to error structure.
2739  *
2740  * @return
2741  *   0 on success, a negative errno value otherwise and rte_errno is set.
2742  */
2743 static int
2744 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2745                                  uint64_t action_flags,
2746                                  const struct rte_flow_action *action,
2747                                  uint64_t item_flags,
2748                                  const struct rte_flow_attr *attr,
2749                                  struct rte_flow_error *error)
2750 {
2751         const struct mlx5_priv *priv = dev->data->dev_private;
2752
2753         (void)action;
2754         (void)attr;
2755         if (!priv->sh->pop_vlan_action)
2756                 return rte_flow_error_set(error, ENOTSUP,
2757                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2758                                           NULL,
2759                                           "pop vlan action is not supported");
2760         if (attr->egress)
2761                 return rte_flow_error_set(error, ENOTSUP,
2762                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2763                                           NULL,
2764                                           "pop vlan action not supported for "
2765                                           "egress");
2766         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2767                 return rte_flow_error_set(error, ENOTSUP,
2768                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2769                                           "no support for multiple VLAN "
2770                                           "actions");
2771         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2772         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2773             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2774                 return rte_flow_error_set(error, ENOTSUP,
2775                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2776                                           NULL,
2777                                           "cannot pop vlan after decap without "
2778                                           "match on inner vlan in the flow");
2779         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2780         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2781             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2782                 return rte_flow_error_set(error, ENOTSUP,
2783                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2784                                           NULL,
2785                                           "cannot pop vlan without a "
2786                                           "match on (outer) vlan in the flow");
2787         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2788                 return rte_flow_error_set(error, EINVAL,
2789                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2790                                           "wrong action order, port_id should "
2791                                           "be after pop VLAN action");
2792         if (!attr->transfer && priv->representor)
2793                 return rte_flow_error_set(error, ENOTSUP,
2794                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2795                                           "pop vlan action for VF representor "
2796                                           "not supported on NIC table");
2797         return 0;
2798 }
2799
2800 /**
2801  * Get VLAN default info from vlan match info.
2802  *
2803  * @param[in] items
2804  *   the list of item specifications.
2805  * @param[out] vlan
2806  *   pointer VLAN info to fill to.
2807  *
2808  * @return
2809  *   0 on success, a negative errno value otherwise and rte_errno is set.
2810  */
2811 static void
2812 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2813                                   struct rte_vlan_hdr *vlan)
2814 {
2815         const struct rte_flow_item_vlan nic_mask = {
2816                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2817                                 MLX5DV_FLOW_VLAN_VID_MASK),
2818                 .inner_type = RTE_BE16(0xffff),
2819         };
2820
2821         if (items == NULL)
2822                 return;
2823         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2824                 int type = items->type;
2825
2826                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2827                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2828                         break;
2829         }
2830         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2831                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2832                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2833
2834                 /* If VLAN item in pattern doesn't contain data, return here. */
2835                 if (!vlan_v)
2836                         return;
2837                 if (!vlan_m)
2838                         vlan_m = &nic_mask;
2839                 /* Only full match values are accepted */
2840                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2841                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2842                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2843                         vlan->vlan_tci |=
2844                                 rte_be_to_cpu_16(vlan_v->tci &
2845                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2846                 }
2847                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2848                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2849                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2850                         vlan->vlan_tci |=
2851                                 rte_be_to_cpu_16(vlan_v->tci &
2852                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2853                 }
2854                 if (vlan_m->inner_type == nic_mask.inner_type)
2855                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2856                                                            vlan_m->inner_type);
2857         }
2858 }
2859
2860 /**
2861  * Validate the push VLAN action.
2862  *
2863  * @param[in] dev
2864  *   Pointer to the rte_eth_dev structure.
2865  * @param[in] action_flags
2866  *   Holds the actions detected until now.
2867  * @param[in] item_flags
2868  *   The items found in this flow rule.
2869  * @param[in] action
2870  *   Pointer to the action structure.
2871  * @param[in] attr
2872  *   Pointer to flow attributes
2873  * @param[out] error
2874  *   Pointer to error structure.
2875  *
2876  * @return
2877  *   0 on success, a negative errno value otherwise and rte_errno is set.
2878  */
2879 static int
2880 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2881                                   uint64_t action_flags,
2882                                   const struct rte_flow_item_vlan *vlan_m,
2883                                   const struct rte_flow_action *action,
2884                                   const struct rte_flow_attr *attr,
2885                                   struct rte_flow_error *error)
2886 {
2887         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2888         const struct mlx5_priv *priv = dev->data->dev_private;
2889
2890         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2891             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2892                 return rte_flow_error_set(error, EINVAL,
2893                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2894                                           "invalid vlan ethertype");
2895         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2896                 return rte_flow_error_set(error, EINVAL,
2897                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2898                                           "wrong action order, port_id should "
2899                                           "be after push VLAN");
2900         if (!attr->transfer && priv->representor)
2901                 return rte_flow_error_set(error, ENOTSUP,
2902                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2903                                           "push vlan action for VF representor "
2904                                           "not supported on NIC table");
2905         if (vlan_m &&
2906             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2907             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2908                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2909             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2910             !(mlx5_flow_find_action
2911                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2912                 return rte_flow_error_set(error, EINVAL,
2913                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2914                                           "not full match mask on VLAN PCP and "
2915                                           "there is no of_set_vlan_pcp action, "
2916                                           "push VLAN action cannot figure out "
2917                                           "PCP value");
2918         if (vlan_m &&
2919             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2920             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2921                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2922             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2923             !(mlx5_flow_find_action
2924                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2925                 return rte_flow_error_set(error, EINVAL,
2926                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2927                                           "not full match mask on VLAN VID and "
2928                                           "there is no of_set_vlan_vid action, "
2929                                           "push VLAN action cannot figure out "
2930                                           "VID value");
2931         (void)attr;
2932         return 0;
2933 }
2934
2935 /**
2936  * Validate the set VLAN PCP.
2937  *
2938  * @param[in] action_flags
2939  *   Holds the actions detected until now.
2940  * @param[in] actions
2941  *   Pointer to the list of actions remaining in the flow rule.
2942  * @param[out] error
2943  *   Pointer to error structure.
2944  *
2945  * @return
2946  *   0 on success, a negative errno value otherwise and rte_errno is set.
2947  */
2948 static int
2949 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2950                                      const struct rte_flow_action actions[],
2951                                      struct rte_flow_error *error)
2952 {
2953         const struct rte_flow_action *action = actions;
2954         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2955
2956         if (conf->vlan_pcp > 7)
2957                 return rte_flow_error_set(error, EINVAL,
2958                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2959                                           "VLAN PCP value is too big");
2960         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2961                 return rte_flow_error_set(error, ENOTSUP,
2962                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2963                                           "set VLAN PCP action must follow "
2964                                           "the push VLAN action");
2965         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2966                 return rte_flow_error_set(error, ENOTSUP,
2967                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2968                                           "Multiple VLAN PCP modification are "
2969                                           "not supported");
2970         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2971                 return rte_flow_error_set(error, EINVAL,
2972                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2973                                           "wrong action order, port_id should "
2974                                           "be after set VLAN PCP");
2975         return 0;
2976 }
2977
2978 /**
2979  * Validate the set VLAN VID.
2980  *
2981  * @param[in] item_flags
2982  *   Holds the items detected in this rule.
2983  * @param[in] action_flags
2984  *   Holds the actions detected until now.
2985  * @param[in] actions
2986  *   Pointer to the list of actions remaining in the flow rule.
2987  * @param[out] error
2988  *   Pointer to error structure.
2989  *
2990  * @return
2991  *   0 on success, a negative errno value otherwise and rte_errno is set.
2992  */
2993 static int
2994 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2995                                      uint64_t action_flags,
2996                                      const struct rte_flow_action actions[],
2997                                      struct rte_flow_error *error)
2998 {
2999         const struct rte_flow_action *action = actions;
3000         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
3001
3002         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
3003                 return rte_flow_error_set(error, EINVAL,
3004                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3005                                           "VLAN VID value is too big");
3006         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
3007             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
3008                 return rte_flow_error_set(error, ENOTSUP,
3009                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3010                                           "set VLAN VID action must follow push"
3011                                           " VLAN action or match on VLAN item");
3012         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
3013                 return rte_flow_error_set(error, ENOTSUP,
3014                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3015                                           "Multiple VLAN VID modifications are "
3016                                           "not supported");
3017         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
3018                 return rte_flow_error_set(error, EINVAL,
3019                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3020                                           "wrong action order, port_id should "
3021                                           "be after set VLAN VID");
3022         return 0;
3023 }
3024
3025 /*
3026  * Validate the FLAG action.
3027  *
3028  * @param[in] dev
3029  *   Pointer to the rte_eth_dev structure.
3030  * @param[in] action_flags
3031  *   Holds the actions detected until now.
3032  * @param[in] attr
3033  *   Pointer to flow attributes
3034  * @param[out] error
3035  *   Pointer to error structure.
3036  *
3037  * @return
3038  *   0 on success, a negative errno value otherwise and rte_errno is set.
3039  */
3040 static int
3041 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
3042                              uint64_t action_flags,
3043                              const struct rte_flow_attr *attr,
3044                              struct rte_flow_error *error)
3045 {
3046         struct mlx5_priv *priv = dev->data->dev_private;
3047         struct mlx5_dev_config *config = &priv->config;
3048         int ret;
3049
3050         /* Fall back if no extended metadata register support. */
3051         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3052                 return mlx5_flow_validate_action_flag(action_flags, attr,
3053                                                       error);
3054         /* Extensive metadata mode requires registers. */
3055         if (!mlx5_flow_ext_mreg_supported(dev))
3056                 return rte_flow_error_set(error, ENOTSUP,
3057                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3058                                           "no metadata registers "
3059                                           "to support flag action");
3060         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
3061                 return rte_flow_error_set(error, ENOTSUP,
3062                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3063                                           "extended metadata register"
3064                                           " isn't available");
3065         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3066         if (ret < 0)
3067                 return ret;
3068         MLX5_ASSERT(ret > 0);
3069         if (action_flags & MLX5_FLOW_ACTION_MARK)
3070                 return rte_flow_error_set(error, EINVAL,
3071                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3072                                           "can't mark and flag in same flow");
3073         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3074                 return rte_flow_error_set(error, EINVAL,
3075                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3076                                           "can't have 2 flag"
3077                                           " actions in same flow");
3078         return 0;
3079 }
3080
3081 /**
3082  * Validate MARK action.
3083  *
3084  * @param[in] dev
3085  *   Pointer to the rte_eth_dev structure.
3086  * @param[in] action
3087  *   Pointer to action.
3088  * @param[in] action_flags
3089  *   Holds the actions detected until now.
3090  * @param[in] attr
3091  *   Pointer to flow attributes
3092  * @param[out] error
3093  *   Pointer to error structure.
3094  *
3095  * @return
3096  *   0 on success, a negative errno value otherwise and rte_errno is set.
3097  */
3098 static int
3099 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
3100                              const struct rte_flow_action *action,
3101                              uint64_t action_flags,
3102                              const struct rte_flow_attr *attr,
3103                              struct rte_flow_error *error)
3104 {
3105         struct mlx5_priv *priv = dev->data->dev_private;
3106         struct mlx5_dev_config *config = &priv->config;
3107         const struct rte_flow_action_mark *mark = action->conf;
3108         int ret;
3109
3110         if (is_tunnel_offload_active(dev))
3111                 return rte_flow_error_set(error, ENOTSUP,
3112                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3113                                           "no mark action "
3114                                           "if tunnel offload active");
3115         /* Fall back if no extended metadata register support. */
3116         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3117                 return mlx5_flow_validate_action_mark(action, action_flags,
3118                                                       attr, error);
3119         /* Extensive metadata mode requires registers. */
3120         if (!mlx5_flow_ext_mreg_supported(dev))
3121                 return rte_flow_error_set(error, ENOTSUP,
3122                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3123                                           "no metadata registers "
3124                                           "to support mark action");
3125         if (!priv->sh->dv_mark_mask)
3126                 return rte_flow_error_set(error, ENOTSUP,
3127                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3128                                           "extended metadata register"
3129                                           " isn't available");
3130         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3131         if (ret < 0)
3132                 return ret;
3133         MLX5_ASSERT(ret > 0);
3134         if (!mark)
3135                 return rte_flow_error_set(error, EINVAL,
3136                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3137                                           "configuration cannot be null");
3138         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3139                 return rte_flow_error_set(error, EINVAL,
3140                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3141                                           &mark->id,
3142                                           "mark id exceeds the limit");
3143         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3144                 return rte_flow_error_set(error, EINVAL,
3145                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3146                                           "can't flag and mark in same flow");
3147         if (action_flags & MLX5_FLOW_ACTION_MARK)
3148                 return rte_flow_error_set(error, EINVAL,
3149                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3150                                           "can't have 2 mark actions in same"
3151                                           " flow");
3152         return 0;
3153 }
3154
3155 /**
3156  * Validate SET_META action.
3157  *
3158  * @param[in] dev
3159  *   Pointer to the rte_eth_dev structure.
3160  * @param[in] action
3161  *   Pointer to the action structure.
3162  * @param[in] action_flags
3163  *   Holds the actions detected until now.
3164  * @param[in] attr
3165  *   Pointer to flow attributes
3166  * @param[out] error
3167  *   Pointer to error structure.
3168  *
3169  * @return
3170  *   0 on success, a negative errno value otherwise and rte_errno is set.
3171  */
3172 static int
3173 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3174                                  const struct rte_flow_action *action,
3175                                  uint64_t action_flags __rte_unused,
3176                                  const struct rte_flow_attr *attr,
3177                                  struct rte_flow_error *error)
3178 {
3179         const struct rte_flow_action_set_meta *conf;
3180         uint32_t nic_mask = UINT32_MAX;
3181         int reg;
3182
3183         if (!mlx5_flow_ext_mreg_supported(dev))
3184                 return rte_flow_error_set(error, ENOTSUP,
3185                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3186                                           "extended metadata register"
3187                                           " isn't supported");
3188         reg = flow_dv_get_metadata_reg(dev, attr, error);
3189         if (reg < 0)
3190                 return reg;
3191         if (reg == REG_NON)
3192                 return rte_flow_error_set(error, ENOTSUP,
3193                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3194                                           "unavalable extended metadata register");
3195         if (reg != REG_A && reg != REG_B) {
3196                 struct mlx5_priv *priv = dev->data->dev_private;
3197
3198                 nic_mask = priv->sh->dv_meta_mask;
3199         }
3200         if (!(action->conf))
3201                 return rte_flow_error_set(error, EINVAL,
3202                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3203                                           "configuration cannot be null");
3204         conf = (const struct rte_flow_action_set_meta *)action->conf;
3205         if (!conf->mask)
3206                 return rte_flow_error_set(error, EINVAL,
3207                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3208                                           "zero mask doesn't have any effect");
3209         if (conf->mask & ~nic_mask)
3210                 return rte_flow_error_set(error, EINVAL,
3211                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3212                                           "meta data must be within reg C0");
3213         return 0;
3214 }
3215
3216 /**
3217  * Validate SET_TAG action.
3218  *
3219  * @param[in] dev
3220  *   Pointer to the rte_eth_dev structure.
3221  * @param[in] action
3222  *   Pointer to the action structure.
3223  * @param[in] action_flags
3224  *   Holds the actions detected until now.
3225  * @param[in] attr
3226  *   Pointer to flow attributes
3227  * @param[out] error
3228  *   Pointer to error structure.
3229  *
3230  * @return
3231  *   0 on success, a negative errno value otherwise and rte_errno is set.
3232  */
3233 static int
3234 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3235                                 const struct rte_flow_action *action,
3236                                 uint64_t action_flags,
3237                                 const struct rte_flow_attr *attr,
3238                                 struct rte_flow_error *error)
3239 {
3240         const struct rte_flow_action_set_tag *conf;
3241         const uint64_t terminal_action_flags =
3242                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3243                 MLX5_FLOW_ACTION_RSS;
3244         int ret;
3245
3246         if (!mlx5_flow_ext_mreg_supported(dev))
3247                 return rte_flow_error_set(error, ENOTSUP,
3248                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3249                                           "extensive metadata register"
3250                                           " isn't supported");
3251         if (!(action->conf))
3252                 return rte_flow_error_set(error, EINVAL,
3253                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3254                                           "configuration cannot be null");
3255         conf = (const struct rte_flow_action_set_tag *)action->conf;
3256         if (!conf->mask)
3257                 return rte_flow_error_set(error, EINVAL,
3258                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3259                                           "zero mask doesn't have any effect");
3260         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3261         if (ret < 0)
3262                 return ret;
3263         if (!attr->transfer && attr->ingress &&
3264             (action_flags & terminal_action_flags))
3265                 return rte_flow_error_set(error, EINVAL,
3266                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3267                                           "set_tag has no effect"
3268                                           " with terminal actions");
3269         return 0;
3270 }
3271
3272 /**
3273  * Check if action counter is shared by either old or new mechanism.
3274  *
3275  * @param[in] action
3276  *   Pointer to the action structure.
3277  *
3278  * @return
3279  *   True when counter is shared, false otherwise.
3280  */
3281 static inline bool
3282 is_shared_action_count(const struct rte_flow_action *action)
3283 {
3284         const struct rte_flow_action_count *count =
3285                         (const struct rte_flow_action_count *)action->conf;
3286
3287         if ((int)action->type == MLX5_RTE_FLOW_ACTION_TYPE_COUNT)
3288                 return true;
3289         return !!(count && count->shared);
3290 }
3291
3292 /**
3293  * Validate count action.
3294  *
3295  * @param[in] dev
3296  *   Pointer to rte_eth_dev structure.
3297  * @param[in] shared
3298  *   Indicator if action is shared.
3299  * @param[in] action_flags
3300  *   Holds the actions detected until now.
3301  * @param[out] error
3302  *   Pointer to error structure.
3303  *
3304  * @return
3305  *   0 on success, a negative errno value otherwise and rte_errno is set.
3306  */
3307 static int
3308 flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
3309                               uint64_t action_flags,
3310                               struct rte_flow_error *error)
3311 {
3312         struct mlx5_priv *priv = dev->data->dev_private;
3313
3314         if (!priv->config.devx)
3315                 goto notsup_err;
3316         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3317                 return rte_flow_error_set(error, EINVAL,
3318                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3319                                           "duplicate count actions set");
3320         if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3321             !priv->sh->flow_hit_aso_en)
3322                 return rte_flow_error_set(error, EINVAL,
3323                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3324                                           "old age and shared count combination is not supported");
3325 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3326         return 0;
3327 #endif
3328 notsup_err:
3329         return rte_flow_error_set
3330                       (error, ENOTSUP,
3331                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3332                        NULL,
3333                        "count action not supported");
3334 }
3335
3336 /**
3337  * Validate the L2 encap action.
3338  *
3339  * @param[in] dev
3340  *   Pointer to the rte_eth_dev structure.
3341  * @param[in] action_flags
3342  *   Holds the actions detected until now.
3343  * @param[in] action
3344  *   Pointer to the action structure.
3345  * @param[in] attr
3346  *   Pointer to flow attributes.
3347  * @param[out] error
3348  *   Pointer to error structure.
3349  *
3350  * @return
3351  *   0 on success, a negative errno value otherwise and rte_errno is set.
3352  */
3353 static int
3354 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3355                                  uint64_t action_flags,
3356                                  const struct rte_flow_action *action,
3357                                  const struct rte_flow_attr *attr,
3358                                  struct rte_flow_error *error)
3359 {
3360         const struct mlx5_priv *priv = dev->data->dev_private;
3361
3362         if (!(action->conf))
3363                 return rte_flow_error_set(error, EINVAL,
3364                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3365                                           "configuration cannot be null");
3366         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3367                 return rte_flow_error_set(error, EINVAL,
3368                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3369                                           "can only have a single encap action "
3370                                           "in a flow");
3371         if (!attr->transfer && priv->representor)
3372                 return rte_flow_error_set(error, ENOTSUP,
3373                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3374                                           "encap action for VF representor "
3375                                           "not supported on NIC table");
3376         return 0;
3377 }
3378
3379 /**
3380  * Validate a decap action.
3381  *
3382  * @param[in] dev
3383  *   Pointer to the rte_eth_dev structure.
3384  * @param[in] action_flags
3385  *   Holds the actions detected until now.
3386  * @param[in] action
3387  *   Pointer to the action structure.
3388  * @param[in] item_flags
3389  *   Holds the items detected.
3390  * @param[in] attr
3391  *   Pointer to flow attributes
3392  * @param[out] error
3393  *   Pointer to error structure.
3394  *
3395  * @return
3396  *   0 on success, a negative errno value otherwise and rte_errno is set.
3397  */
3398 static int
3399 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3400                               uint64_t action_flags,
3401                               const struct rte_flow_action *action,
3402                               const uint64_t item_flags,
3403                               const struct rte_flow_attr *attr,
3404                               struct rte_flow_error *error)
3405 {
3406         const struct mlx5_priv *priv = dev->data->dev_private;
3407
3408         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
3409             !priv->config.decap_en)
3410                 return rte_flow_error_set(error, ENOTSUP,
3411                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3412                                           "decap is not enabled");
3413         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3414                 return rte_flow_error_set(error, ENOTSUP,
3415                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3416                                           action_flags &
3417                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3418                                           "have a single decap action" : "decap "
3419                                           "after encap is not supported");
3420         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3421                 return rte_flow_error_set(error, EINVAL,
3422                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3423                                           "can't have decap action after"
3424                                           " modify action");
3425         if (attr->egress)
3426                 return rte_flow_error_set(error, ENOTSUP,
3427                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3428                                           NULL,
3429                                           "decap action not supported for "
3430                                           "egress");
3431         if (!attr->transfer && priv->representor)
3432                 return rte_flow_error_set(error, ENOTSUP,
3433                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3434                                           "decap action for VF representor "
3435                                           "not supported on NIC table");
3436         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3437             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3438                 return rte_flow_error_set(error, ENOTSUP,
3439                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3440                                 "VXLAN item should be present for VXLAN decap");
3441         return 0;
3442 }
3443
3444 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3445
3446 /**
3447  * Validate the raw encap and decap actions.
3448  *
3449  * @param[in] dev
3450  *   Pointer to the rte_eth_dev structure.
3451  * @param[in] decap
3452  *   Pointer to the decap action.
3453  * @param[in] encap
3454  *   Pointer to the encap action.
3455  * @param[in] attr
3456  *   Pointer to flow attributes
3457  * @param[in/out] action_flags
3458  *   Holds the actions detected until now.
3459  * @param[out] actions_n
3460  *   pointer to the number of actions counter.
3461  * @param[in] action
3462  *   Pointer to the action structure.
3463  * @param[in] item_flags
3464  *   Holds the items detected.
3465  * @param[out] error
3466  *   Pointer to error structure.
3467  *
3468  * @return
3469  *   0 on success, a negative errno value otherwise and rte_errno is set.
3470  */
3471 static int
3472 flow_dv_validate_action_raw_encap_decap
3473         (struct rte_eth_dev *dev,
3474          const struct rte_flow_action_raw_decap *decap,
3475          const struct rte_flow_action_raw_encap *encap,
3476          const struct rte_flow_attr *attr, uint64_t *action_flags,
3477          int *actions_n, const struct rte_flow_action *action,
3478          uint64_t item_flags, struct rte_flow_error *error)
3479 {
3480         const struct mlx5_priv *priv = dev->data->dev_private;
3481         int ret;
3482
3483         if (encap && (!encap->size || !encap->data))
3484                 return rte_flow_error_set(error, EINVAL,
3485                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3486                                           "raw encap data cannot be empty");
3487         if (decap && encap) {
3488                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3489                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3490                         /* L3 encap. */
3491                         decap = NULL;
3492                 else if (encap->size <=
3493                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3494                            decap->size >
3495                            MLX5_ENCAPSULATION_DECISION_SIZE)
3496                         /* L3 decap. */
3497                         encap = NULL;
3498                 else if (encap->size >
3499                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3500                            decap->size >
3501                            MLX5_ENCAPSULATION_DECISION_SIZE)
3502                         /* 2 L2 actions: encap and decap. */
3503                         ;
3504                 else
3505                         return rte_flow_error_set(error,
3506                                 ENOTSUP,
3507                                 RTE_FLOW_ERROR_TYPE_ACTION,
3508                                 NULL, "unsupported too small "
3509                                 "raw decap and too small raw "
3510                                 "encap combination");
3511         }
3512         if (decap) {
3513                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3514                                                     item_flags, attr, error);
3515                 if (ret < 0)
3516                         return ret;
3517                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3518                 ++(*actions_n);
3519         }
3520         if (encap) {
3521                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3522                         return rte_flow_error_set(error, ENOTSUP,
3523                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3524                                                   NULL,
3525                                                   "small raw encap size");
3526                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3527                         return rte_flow_error_set(error, EINVAL,
3528                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3529                                                   NULL,
3530                                                   "more than one encap action");
3531                 if (!attr->transfer && priv->representor)
3532                         return rte_flow_error_set
3533                                         (error, ENOTSUP,
3534                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3535                                          "encap action for VF representor "
3536                                          "not supported on NIC table");
3537                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3538                 ++(*actions_n);
3539         }
3540         return 0;
3541 }
3542
3543 /*
3544  * Validate the ASO CT action.
3545  *
3546  * @param[in] dev
3547  *   Pointer to the rte_eth_dev structure.
3548  * @param[in] action_flags
3549  *   Holds the actions detected until now.
3550  * @param[in] item_flags
3551  *   The items found in this flow rule.
3552  * @param[in] attr
3553  *   Pointer to flow attributes.
3554  * @param[out] error
3555  *   Pointer to error structure.
3556  *
3557  * @return
3558  *   0 on success, a negative errno value otherwise and rte_errno is set.
3559  */
3560 static int
3561 flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3562                                uint64_t action_flags,
3563                                uint64_t item_flags,
3564                                const struct rte_flow_attr *attr,
3565                                struct rte_flow_error *error)
3566 {
3567         RTE_SET_USED(dev);
3568
3569         if (attr->group == 0 && !attr->transfer)
3570                 return rte_flow_error_set(error, ENOTSUP,
3571                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3572                                           NULL,
3573                                           "Only support non-root table");
3574         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
3575                 return rte_flow_error_set(error, ENOTSUP,
3576                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3577                                           "CT cannot follow a fate action");
3578         if ((action_flags & MLX5_FLOW_ACTION_METER) ||
3579             (action_flags & MLX5_FLOW_ACTION_AGE))
3580                 return rte_flow_error_set(error, EINVAL,
3581                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3582                                           "Only one ASO action is supported");
3583         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3584                 return rte_flow_error_set(error, EINVAL,
3585                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3586                                           "Encap cannot exist before CT");
3587         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3588                 return rte_flow_error_set(error, EINVAL,
3589                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3590                                           "Not a outer TCP packet");
3591         return 0;
3592 }
3593
3594 /**
3595  * Match encap_decap resource.
3596  *
3597  * @param list
3598  *   Pointer to the hash list.
3599  * @param entry
3600  *   Pointer to exist resource entry object.
3601  * @param key
3602  *   Key of the new entry.
3603  * @param ctx_cb
3604  *   Pointer to new encap_decap resource.
3605  *
3606  * @return
3607  *   0 on matching, none-zero otherwise.
3608  */
3609 int
3610 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
3611                              struct mlx5_hlist_entry *entry,
3612                              uint64_t key __rte_unused, void *cb_ctx)
3613 {
3614         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3615         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3616         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3617
3618         cache_resource = container_of(entry,
3619                                       struct mlx5_flow_dv_encap_decap_resource,
3620                                       entry);
3621         if (resource->reformat_type == cache_resource->reformat_type &&
3622             resource->ft_type == cache_resource->ft_type &&
3623             resource->flags == cache_resource->flags &&
3624             resource->size == cache_resource->size &&
3625             !memcmp((const void *)resource->buf,
3626                     (const void *)cache_resource->buf,
3627                     resource->size))
3628                 return 0;
3629         return -1;
3630 }
3631
3632 /**
3633  * Allocate encap_decap resource.
3634  *
3635  * @param list
3636  *   Pointer to the hash list.
3637  * @param entry
3638  *   Pointer to exist resource entry object.
3639  * @param ctx_cb
3640  *   Pointer to new encap_decap resource.
3641  *
3642  * @return
3643  *   0 on matching, none-zero otherwise.
3644  */
3645 struct mlx5_hlist_entry *
3646 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
3647                               uint64_t key __rte_unused,
3648                               void *cb_ctx)
3649 {
3650         struct mlx5_dev_ctx_shared *sh = list->ctx;
3651         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3652         struct mlx5dv_dr_domain *domain;
3653         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3654         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3655         uint32_t idx;
3656         int ret;
3657
3658         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3659                 domain = sh->fdb_domain;
3660         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3661                 domain = sh->rx_domain;
3662         else
3663                 domain = sh->tx_domain;
3664         /* Register new encap/decap resource. */
3665         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3666                                        &idx);
3667         if (!cache_resource) {
3668                 rte_flow_error_set(ctx->error, ENOMEM,
3669                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3670                                    "cannot allocate resource memory");
3671                 return NULL;
3672         }
3673         *cache_resource = *resource;
3674         cache_resource->idx = idx;
3675         ret = mlx5_flow_os_create_flow_action_packet_reformat
3676                                         (sh->ctx, domain, cache_resource,
3677                                          &cache_resource->action);
3678         if (ret) {
3679                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3680                 rte_flow_error_set(ctx->error, ENOMEM,
3681                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3682                                    NULL, "cannot create action");
3683                 return NULL;
3684         }
3685
3686         return &cache_resource->entry;
3687 }
3688
3689 /**
3690  * Find existing encap/decap resource or create and register a new one.
3691  *
3692  * @param[in, out] dev
3693  *   Pointer to rte_eth_dev structure.
3694  * @param[in, out] resource
3695  *   Pointer to encap/decap resource.
3696  * @parm[in, out] dev_flow
3697  *   Pointer to the dev_flow.
3698  * @param[out] error
3699  *   pointer to error structure.
3700  *
3701  * @return
3702  *   0 on success otherwise -errno and errno is set.
3703  */
3704 static int
3705 flow_dv_encap_decap_resource_register
3706                         (struct rte_eth_dev *dev,
3707                          struct mlx5_flow_dv_encap_decap_resource *resource,
3708                          struct mlx5_flow *dev_flow,
3709                          struct rte_flow_error *error)
3710 {
3711         struct mlx5_priv *priv = dev->data->dev_private;
3712         struct mlx5_dev_ctx_shared *sh = priv->sh;
3713         struct mlx5_hlist_entry *entry;
3714         union {
3715                 struct {
3716                         uint32_t ft_type:8;
3717                         uint32_t refmt_type:8;
3718                         /*
3719                          * Header reformat actions can be shared between
3720                          * non-root tables. One bit to indicate non-root
3721                          * table or not.
3722                          */
3723                         uint32_t is_root:1;
3724                         uint32_t reserve:15;
3725                 };
3726                 uint32_t v32;
3727         } encap_decap_key = {
3728                 {
3729                         .ft_type = resource->ft_type,
3730                         .refmt_type = resource->reformat_type,
3731                         .is_root = !!dev_flow->dv.group,
3732                         .reserve = 0,
3733                 }
3734         };
3735         struct mlx5_flow_cb_ctx ctx = {
3736                 .error = error,
3737                 .data = resource,
3738         };
3739         uint64_t key64;
3740
3741         resource->flags = dev_flow->dv.group ? 0 : 1;
3742         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3743                                  sizeof(encap_decap_key.v32), 0);
3744         if (resource->reformat_type !=
3745             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3746             resource->size)
3747                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3748         entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx);
3749         if (!entry)
3750                 return -rte_errno;
3751         resource = container_of(entry, typeof(*resource), entry);
3752         dev_flow->dv.encap_decap = resource;
3753         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3754         return 0;
3755 }
3756
3757 /**
3758  * Find existing table jump resource or create and register a new one.
3759  *
3760  * @param[in, out] dev
3761  *   Pointer to rte_eth_dev structure.
3762  * @param[in, out] tbl
3763  *   Pointer to flow table resource.
3764  * @parm[in, out] dev_flow
3765  *   Pointer to the dev_flow.
3766  * @param[out] error
3767  *   pointer to error structure.
3768  *
3769  * @return
3770  *   0 on success otherwise -errno and errno is set.
3771  */
3772 static int
3773 flow_dv_jump_tbl_resource_register
3774                         (struct rte_eth_dev *dev __rte_unused,
3775                          struct mlx5_flow_tbl_resource *tbl,
3776                          struct mlx5_flow *dev_flow,
3777                          struct rte_flow_error *error __rte_unused)
3778 {
3779         struct mlx5_flow_tbl_data_entry *tbl_data =
3780                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3781
3782         MLX5_ASSERT(tbl);
3783         MLX5_ASSERT(tbl_data->jump.action);
3784         dev_flow->handle->rix_jump = tbl_data->idx;
3785         dev_flow->dv.jump = &tbl_data->jump;
3786         return 0;
3787 }
3788
3789 int
3790 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
3791                          struct mlx5_cache_entry *entry, void *cb_ctx)
3792 {
3793         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3794         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3795         struct mlx5_flow_dv_port_id_action_resource *res =
3796                         container_of(entry, typeof(*res), entry);
3797
3798         return ref->port_id != res->port_id;
3799 }
3800
3801 struct mlx5_cache_entry *
3802 flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
3803                           struct mlx5_cache_entry *entry __rte_unused,
3804                           void *cb_ctx)
3805 {
3806         struct mlx5_dev_ctx_shared *sh = list->ctx;
3807         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3808         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3809         struct mlx5_flow_dv_port_id_action_resource *cache;
3810         uint32_t idx;
3811         int ret;
3812
3813         /* Register new port id action resource. */
3814         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3815         if (!cache) {
3816                 rte_flow_error_set(ctx->error, ENOMEM,
3817                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3818                                    "cannot allocate port_id action cache memory");
3819                 return NULL;
3820         }
3821         *cache = *ref;
3822         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3823                                                         ref->port_id,
3824                                                         &cache->action);
3825         if (ret) {
3826                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3827                 rte_flow_error_set(ctx->error, ENOMEM,
3828                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3829                                    "cannot create action");
3830                 return NULL;
3831         }
3832         cache->idx = idx;
3833         return &cache->entry;
3834 }
3835
3836 /**
3837  * Find existing table port ID resource or create and register a new one.
3838  *
3839  * @param[in, out] dev
3840  *   Pointer to rte_eth_dev structure.
3841  * @param[in, out] resource
3842  *   Pointer to port ID action resource.
3843  * @parm[in, out] dev_flow
3844  *   Pointer to the dev_flow.
3845  * @param[out] error
3846  *   pointer to error structure.
3847  *
3848  * @return
3849  *   0 on success otherwise -errno and errno is set.
3850  */
3851 static int
3852 flow_dv_port_id_action_resource_register
3853                         (struct rte_eth_dev *dev,
3854                          struct mlx5_flow_dv_port_id_action_resource *resource,
3855                          struct mlx5_flow *dev_flow,
3856                          struct rte_flow_error *error)
3857 {
3858         struct mlx5_priv *priv = dev->data->dev_private;
3859         struct mlx5_cache_entry *entry;
3860         struct mlx5_flow_dv_port_id_action_resource *cache;
3861         struct mlx5_flow_cb_ctx ctx = {
3862                 .error = error,
3863                 .data = resource,
3864         };
3865
3866         entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
3867         if (!entry)
3868                 return -rte_errno;
3869         cache = container_of(entry, typeof(*cache), entry);
3870         dev_flow->dv.port_id_action = cache;
3871         dev_flow->handle->rix_port_id_action = cache->idx;
3872         return 0;
3873 }
3874
3875 int
3876 flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
3877                          struct mlx5_cache_entry *entry, void *cb_ctx)
3878 {
3879         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3880         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3881         struct mlx5_flow_dv_push_vlan_action_resource *res =
3882                         container_of(entry, typeof(*res), entry);
3883
3884         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3885 }
3886
3887 struct mlx5_cache_entry *
3888 flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
3889                           struct mlx5_cache_entry *entry __rte_unused,
3890                           void *cb_ctx)
3891 {
3892         struct mlx5_dev_ctx_shared *sh = list->ctx;
3893         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3894         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3895         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3896         struct mlx5dv_dr_domain *domain;
3897         uint32_t idx;
3898         int ret;
3899
3900         /* Register new port id action resource. */
3901         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3902         if (!cache) {
3903                 rte_flow_error_set(ctx->error, ENOMEM,
3904                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3905                                    "cannot allocate push_vlan action cache memory");
3906                 return NULL;
3907         }
3908         *cache = *ref;
3909         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3910                 domain = sh->fdb_domain;
3911         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3912                 domain = sh->rx_domain;
3913         else
3914                 domain = sh->tx_domain;
3915         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3916                                                         &cache->action);
3917         if (ret) {
3918                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3919                 rte_flow_error_set(ctx->error, ENOMEM,
3920                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3921                                    "cannot create push vlan action");
3922                 return NULL;
3923         }
3924         cache->idx = idx;
3925         return &cache->entry;
3926 }
3927
3928 /**
3929  * Find existing push vlan resource or create and register a new one.
3930  *
3931  * @param [in, out] dev
3932  *   Pointer to rte_eth_dev structure.
3933  * @param[in, out] resource
3934  *   Pointer to port ID action resource.
3935  * @parm[in, out] dev_flow
3936  *   Pointer to the dev_flow.
3937  * @param[out] error
3938  *   pointer to error structure.
3939  *
3940  * @return
3941  *   0 on success otherwise -errno and errno is set.
3942  */
3943 static int
3944 flow_dv_push_vlan_action_resource_register
3945                        (struct rte_eth_dev *dev,
3946                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
3947                         struct mlx5_flow *dev_flow,
3948                         struct rte_flow_error *error)
3949 {
3950         struct mlx5_priv *priv = dev->data->dev_private;
3951         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3952         struct mlx5_cache_entry *entry;
3953         struct mlx5_flow_cb_ctx ctx = {
3954                 .error = error,
3955                 .data = resource,
3956         };
3957
3958         entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
3959         if (!entry)
3960                 return -rte_errno;
3961         cache = container_of(entry, typeof(*cache), entry);
3962
3963         dev_flow->handle->dvh.rix_push_vlan = cache->idx;
3964         dev_flow->dv.push_vlan_res = cache;
3965         return 0;
3966 }
3967
3968 /**
3969  * Get the size of specific rte_flow_item_type hdr size
3970  *
3971  * @param[in] item_type
3972  *   Tested rte_flow_item_type.
3973  *
3974  * @return
3975  *   sizeof struct item_type, 0 if void or irrelevant.
3976  */
3977 static size_t
3978 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3979 {
3980         size_t retval;
3981
3982         switch (item_type) {
3983         case RTE_FLOW_ITEM_TYPE_ETH:
3984                 retval = sizeof(struct rte_ether_hdr);
3985                 break;
3986         case RTE_FLOW_ITEM_TYPE_VLAN:
3987                 retval = sizeof(struct rte_vlan_hdr);
3988                 break;
3989         case RTE_FLOW_ITEM_TYPE_IPV4:
3990                 retval = sizeof(struct rte_ipv4_hdr);
3991                 break;
3992         case RTE_FLOW_ITEM_TYPE_IPV6:
3993                 retval = sizeof(struct rte_ipv6_hdr);
3994                 break;
3995         case RTE_FLOW_ITEM_TYPE_UDP:
3996                 retval = sizeof(struct rte_udp_hdr);
3997                 break;
3998         case RTE_FLOW_ITEM_TYPE_TCP:
3999                 retval = sizeof(struct rte_tcp_hdr);
4000                 break;
4001         case RTE_FLOW_ITEM_TYPE_VXLAN:
4002         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4003                 retval = sizeof(struct rte_vxlan_hdr);
4004                 break;
4005         case RTE_FLOW_ITEM_TYPE_GRE:
4006         case RTE_FLOW_ITEM_TYPE_NVGRE:
4007                 retval = sizeof(struct rte_gre_hdr);
4008                 break;
4009         case RTE_FLOW_ITEM_TYPE_MPLS:
4010                 retval = sizeof(struct rte_mpls_hdr);
4011                 break;
4012         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
4013         default:
4014                 retval = 0;
4015                 break;
4016         }
4017         return retval;
4018 }
4019
4020 #define MLX5_ENCAP_IPV4_VERSION         0x40
4021 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
4022 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
4023 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
4024 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
4025 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
4026 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
4027
4028 /**
4029  * Convert the encap action data from list of rte_flow_item to raw buffer
4030  *
4031  * @param[in] items
4032  *   Pointer to rte_flow_item objects list.
4033  * @param[out] buf
4034  *   Pointer to the output buffer.
4035  * @param[out] size
4036  *   Pointer to the output buffer size.
4037  * @param[out] error
4038  *   Pointer to the error structure.
4039  *
4040  * @return
4041  *   0 on success, a negative errno value otherwise and rte_errno is set.
4042  */
4043 static int
4044 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
4045                            size_t *size, struct rte_flow_error *error)
4046 {
4047         struct rte_ether_hdr *eth = NULL;
4048         struct rte_vlan_hdr *vlan = NULL;
4049         struct rte_ipv4_hdr *ipv4 = NULL;
4050         struct rte_ipv6_hdr *ipv6 = NULL;
4051         struct rte_udp_hdr *udp = NULL;
4052         struct rte_vxlan_hdr *vxlan = NULL;
4053         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
4054         struct rte_gre_hdr *gre = NULL;
4055         size_t len;
4056         size_t temp_size = 0;
4057
4058         if (!items)
4059                 return rte_flow_error_set(error, EINVAL,
4060                                           RTE_FLOW_ERROR_TYPE_ACTION,
4061                                           NULL, "invalid empty data");
4062         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4063                 len = flow_dv_get_item_hdr_len(items->type);
4064                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
4065                         return rte_flow_error_set(error, EINVAL,
4066                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4067                                                   (void *)items->type,
4068                                                   "items total size is too big"
4069                                                   " for encap action");
4070                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
4071                 switch (items->type) {
4072                 case RTE_FLOW_ITEM_TYPE_ETH:
4073                         eth = (struct rte_ether_hdr *)&buf[temp_size];
4074                         break;
4075                 case RTE_FLOW_ITEM_TYPE_VLAN:
4076                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
4077                         if (!eth)
4078                                 return rte_flow_error_set(error, EINVAL,
4079                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4080                                                 (void *)items->type,
4081                                                 "eth header not found");
4082                         if (!eth->ether_type)
4083                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
4084                         break;
4085                 case RTE_FLOW_ITEM_TYPE_IPV4:
4086                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
4087                         if (!vlan && !eth)
4088                                 return rte_flow_error_set(error, EINVAL,
4089                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4090                                                 (void *)items->type,
4091                                                 "neither eth nor vlan"
4092                                                 " header found");
4093                         if (vlan && !vlan->eth_proto)
4094                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4095                         else if (eth && !eth->ether_type)
4096                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4097                         if (!ipv4->version_ihl)
4098                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
4099                                                     MLX5_ENCAP_IPV4_IHL_MIN;
4100                         if (!ipv4->time_to_live)
4101                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
4102                         break;
4103                 case RTE_FLOW_ITEM_TYPE_IPV6:
4104                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
4105                         if (!vlan && !eth)
4106                                 return rte_flow_error_set(error, EINVAL,
4107                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4108                                                 (void *)items->type,
4109                                                 "neither eth nor vlan"
4110                                                 " header found");
4111                         if (vlan && !vlan->eth_proto)
4112                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4113                         else if (eth && !eth->ether_type)
4114                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4115                         if (!ipv6->vtc_flow)
4116                                 ipv6->vtc_flow =
4117                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
4118                         if (!ipv6->hop_limits)
4119                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
4120                         break;
4121                 case RTE_FLOW_ITEM_TYPE_UDP:
4122                         udp = (struct rte_udp_hdr *)&buf[temp_size];
4123                         if (!ipv4 && !ipv6)
4124                                 return rte_flow_error_set(error, EINVAL,
4125                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4126                                                 (void *)items->type,
4127                                                 "ip header not found");
4128                         if (ipv4 && !ipv4->next_proto_id)
4129                                 ipv4->next_proto_id = IPPROTO_UDP;
4130                         else if (ipv6 && !ipv6->proto)
4131                                 ipv6->proto = IPPROTO_UDP;
4132                         break;
4133                 case RTE_FLOW_ITEM_TYPE_VXLAN:
4134                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
4135                         if (!udp)
4136                                 return rte_flow_error_set(error, EINVAL,
4137                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4138                                                 (void *)items->type,
4139                                                 "udp header not found");
4140                         if (!udp->dst_port)
4141                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
4142                         if (!vxlan->vx_flags)
4143                                 vxlan->vx_flags =
4144                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
4145                         break;
4146                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4147                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
4148                         if (!udp)
4149                                 return rte_flow_error_set(error, EINVAL,
4150                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4151                                                 (void *)items->type,
4152                                                 "udp header not found");
4153                         if (!vxlan_gpe->proto)
4154                                 return rte_flow_error_set(error, EINVAL,
4155                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4156                                                 (void *)items->type,
4157                                                 "next protocol not found");
4158                         if (!udp->dst_port)
4159                                 udp->dst_port =
4160                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
4161                         if (!vxlan_gpe->vx_flags)
4162                                 vxlan_gpe->vx_flags =
4163                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
4164                         break;
4165                 case RTE_FLOW_ITEM_TYPE_GRE:
4166                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4167                         gre = (struct rte_gre_hdr *)&buf[temp_size];
4168                         if (!gre->proto)
4169                                 return rte_flow_error_set(error, EINVAL,
4170                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4171                                                 (void *)items->type,
4172                                                 "next protocol not found");
4173                         if (!ipv4 && !ipv6)
4174                                 return rte_flow_error_set(error, EINVAL,
4175                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4176                                                 (void *)items->type,
4177                                                 "ip header not found");
4178                         if (ipv4 && !ipv4->next_proto_id)
4179                                 ipv4->next_proto_id = IPPROTO_GRE;
4180                         else if (ipv6 && !ipv6->proto)
4181                                 ipv6->proto = IPPROTO_GRE;
4182                         break;
4183                 case RTE_FLOW_ITEM_TYPE_VOID:
4184                         break;
4185                 default:
4186                         return rte_flow_error_set(error, EINVAL,
4187                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4188                                                   (void *)items->type,
4189                                                   "unsupported item type");
4190                         break;
4191                 }
4192                 temp_size += len;
4193         }
4194         *size = temp_size;
4195         return 0;
4196 }
4197
4198 static int
4199 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4200 {
4201         struct rte_ether_hdr *eth = NULL;
4202         struct rte_vlan_hdr *vlan = NULL;
4203         struct rte_ipv6_hdr *ipv6 = NULL;
4204         struct rte_udp_hdr *udp = NULL;
4205         char *next_hdr;
4206         uint16_t proto;
4207
4208         eth = (struct rte_ether_hdr *)data;
4209         next_hdr = (char *)(eth + 1);
4210         proto = RTE_BE16(eth->ether_type);
4211
4212         /* VLAN skipping */
4213         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4214                 vlan = (struct rte_vlan_hdr *)next_hdr;
4215                 proto = RTE_BE16(vlan->eth_proto);
4216                 next_hdr += sizeof(struct rte_vlan_hdr);
4217         }
4218
4219         /* HW calculates IPv4 csum. no need to proceed */
4220         if (proto == RTE_ETHER_TYPE_IPV4)
4221                 return 0;
4222
4223         /* non IPv4/IPv6 header. not supported */
4224         if (proto != RTE_ETHER_TYPE_IPV6) {
4225                 return rte_flow_error_set(error, ENOTSUP,
4226                                           RTE_FLOW_ERROR_TYPE_ACTION,
4227                                           NULL, "Cannot offload non IPv4/IPv6");
4228         }
4229
4230         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4231
4232         /* ignore non UDP */
4233         if (ipv6->proto != IPPROTO_UDP)
4234                 return 0;
4235
4236         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4237         udp->dgram_cksum = 0;
4238
4239         return 0;
4240 }
4241
4242 /**
4243  * Convert L2 encap action to DV specification.
4244  *
4245  * @param[in] dev
4246  *   Pointer to rte_eth_dev structure.
4247  * @param[in] action
4248  *   Pointer to action structure.
4249  * @param[in, out] dev_flow
4250  *   Pointer to the mlx5_flow.
4251  * @param[in] transfer
4252  *   Mark if the flow is E-Switch flow.
4253  * @param[out] error
4254  *   Pointer to the error structure.
4255  *
4256  * @return
4257  *   0 on success, a negative errno value otherwise and rte_errno is set.
4258  */
4259 static int
4260 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4261                                const struct rte_flow_action *action,
4262                                struct mlx5_flow *dev_flow,
4263                                uint8_t transfer,
4264                                struct rte_flow_error *error)
4265 {
4266         const struct rte_flow_item *encap_data;
4267         const struct rte_flow_action_raw_encap *raw_encap_data;
4268         struct mlx5_flow_dv_encap_decap_resource res = {
4269                 .reformat_type =
4270                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4271                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4272                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4273         };
4274
4275         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4276                 raw_encap_data =
4277                         (const struct rte_flow_action_raw_encap *)action->conf;
4278                 res.size = raw_encap_data->size;
4279                 memcpy(res.buf, raw_encap_data->data, res.size);
4280         } else {
4281                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4282                         encap_data =
4283                                 ((const struct rte_flow_action_vxlan_encap *)
4284                                                 action->conf)->definition;
4285                 else
4286                         encap_data =
4287                                 ((const struct rte_flow_action_nvgre_encap *)
4288                                                 action->conf)->definition;
4289                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4290                                                &res.size, error))
4291                         return -rte_errno;
4292         }
4293         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4294                 return -rte_errno;
4295         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4296                 return rte_flow_error_set(error, EINVAL,
4297                                           RTE_FLOW_ERROR_TYPE_ACTION,
4298                                           NULL, "can't create L2 encap action");
4299         return 0;
4300 }
4301
4302 /**
4303  * Convert L2 decap action to DV specification.
4304  *
4305  * @param[in] dev
4306  *   Pointer to rte_eth_dev structure.
4307  * @param[in, out] dev_flow
4308  *   Pointer to the mlx5_flow.
4309  * @param[in] transfer
4310  *   Mark if the flow is E-Switch flow.
4311  * @param[out] error
4312  *   Pointer to the error structure.
4313  *
4314  * @return
4315  *   0 on success, a negative errno value otherwise and rte_errno is set.
4316  */
4317 static int
4318 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4319                                struct mlx5_flow *dev_flow,
4320                                uint8_t transfer,
4321                                struct rte_flow_error *error)
4322 {
4323         struct mlx5_flow_dv_encap_decap_resource res = {
4324                 .size = 0,
4325                 .reformat_type =
4326                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4327                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4328                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4329         };
4330
4331         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4332                 return rte_flow_error_set(error, EINVAL,
4333                                           RTE_FLOW_ERROR_TYPE_ACTION,
4334                                           NULL, "can't create L2 decap action");
4335         return 0;
4336 }
4337
4338 /**
4339  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4340  *
4341  * @param[in] dev
4342  *   Pointer to rte_eth_dev structure.
4343  * @param[in] action
4344  *   Pointer to action structure.
4345  * @param[in, out] dev_flow
4346  *   Pointer to the mlx5_flow.
4347  * @param[in] attr
4348  *   Pointer to the flow attributes.
4349  * @param[out] error
4350  *   Pointer to the error structure.
4351  *
4352  * @return
4353  *   0 on success, a negative errno value otherwise and rte_errno is set.
4354  */
4355 static int
4356 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4357                                 const struct rte_flow_action *action,
4358                                 struct mlx5_flow *dev_flow,
4359                                 const struct rte_flow_attr *attr,
4360                                 struct rte_flow_error *error)
4361 {
4362         const struct rte_flow_action_raw_encap *encap_data;
4363         struct mlx5_flow_dv_encap_decap_resource res;
4364
4365         memset(&res, 0, sizeof(res));
4366         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4367         res.size = encap_data->size;
4368         memcpy(res.buf, encap_data->data, res.size);
4369         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4370                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4371                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4372         if (attr->transfer)
4373                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4374         else
4375                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4376                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4377         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4378                 return rte_flow_error_set(error, EINVAL,
4379                                           RTE_FLOW_ERROR_TYPE_ACTION,
4380                                           NULL, "can't create encap action");
4381         return 0;
4382 }
4383
4384 /**
4385  * Create action push VLAN.
4386  *
4387  * @param[in] dev
4388  *   Pointer to rte_eth_dev structure.
4389  * @param[in] attr
4390  *   Pointer to the flow attributes.
4391  * @param[in] vlan
4392  *   Pointer to the vlan to push to the Ethernet header.
4393  * @param[in, out] dev_flow
4394  *   Pointer to the mlx5_flow.
4395  * @param[out] error
4396  *   Pointer to the error structure.
4397  *
4398  * @return
4399  *   0 on success, a negative errno value otherwise and rte_errno is set.
4400  */
4401 static int
4402 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4403                                 const struct rte_flow_attr *attr,
4404                                 const struct rte_vlan_hdr *vlan,
4405                                 struct mlx5_flow *dev_flow,
4406                                 struct rte_flow_error *error)
4407 {
4408         struct mlx5_flow_dv_push_vlan_action_resource res;
4409
4410         memset(&res, 0, sizeof(res));
4411         res.vlan_tag =
4412                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4413                                  vlan->vlan_tci);
4414         if (attr->transfer)
4415                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4416         else
4417                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4418                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4419         return flow_dv_push_vlan_action_resource_register
4420                                             (dev, &res, dev_flow, error);
4421 }
4422
4423 /**
4424  * Validate the modify-header actions.
4425  *
4426  * @param[in] action_flags
4427  *   Holds the actions detected until now.
4428  * @param[in] action
4429  *   Pointer to the modify action.
4430  * @param[out] error
4431  *   Pointer to error structure.
4432  *
4433  * @return
4434  *   0 on success, a negative errno value otherwise and rte_errno is set.
4435  */
4436 static int
4437 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4438                                    const struct rte_flow_action *action,
4439                                    struct rte_flow_error *error)
4440 {
4441         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4442                 return rte_flow_error_set(error, EINVAL,
4443                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4444                                           NULL, "action configuration not set");
4445         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4446                 return rte_flow_error_set(error, EINVAL,
4447                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4448                                           "can't have encap action before"
4449                                           " modify action");
4450         return 0;
4451 }
4452
4453 /**
4454  * Validate the modify-header MAC address actions.
4455  *
4456  * @param[in] action_flags
4457  *   Holds the actions detected until now.
4458  * @param[in] action
4459  *   Pointer to the modify action.
4460  * @param[in] item_flags
4461  *   Holds the items detected.
4462  * @param[out] error
4463  *   Pointer to error structure.
4464  *
4465  * @return
4466  *   0 on success, a negative errno value otherwise and rte_errno is set.
4467  */
4468 static int
4469 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4470                                    const struct rte_flow_action *action,
4471                                    const uint64_t item_flags,
4472                                    struct rte_flow_error *error)
4473 {
4474         int ret = 0;
4475
4476         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4477         if (!ret) {
4478                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4479                         return rte_flow_error_set(error, EINVAL,
4480                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4481                                                   NULL,
4482                                                   "no L2 item in pattern");
4483         }
4484         return ret;
4485 }
4486
4487 /**
4488  * Validate the modify-header IPv4 address actions.
4489  *
4490  * @param[in] action_flags
4491  *   Holds the actions detected until now.
4492  * @param[in] action
4493  *   Pointer to the modify action.
4494  * @param[in] item_flags
4495  *   Holds the items detected.
4496  * @param[out] error
4497  *   Pointer to error structure.
4498  *
4499  * @return
4500  *   0 on success, a negative errno value otherwise and rte_errno is set.
4501  */
4502 static int
4503 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4504                                     const struct rte_flow_action *action,
4505                                     const uint64_t item_flags,
4506                                     struct rte_flow_error *error)
4507 {
4508         int ret = 0;
4509         uint64_t layer;
4510
4511         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4512         if (!ret) {
4513                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4514                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4515                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4516                 if (!(item_flags & layer))
4517                         return rte_flow_error_set(error, EINVAL,
4518                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4519                                                   NULL,
4520                                                   "no ipv4 item in pattern");
4521         }
4522         return ret;
4523 }
4524
4525 /**
4526  * Validate the modify-header IPv6 address actions.
4527  *
4528  * @param[in] action_flags
4529  *   Holds the actions detected until now.
4530  * @param[in] action
4531  *   Pointer to the modify action.
4532  * @param[in] item_flags
4533  *   Holds the items detected.
4534  * @param[out] error
4535  *   Pointer to error structure.
4536  *
4537  * @return
4538  *   0 on success, a negative errno value otherwise and rte_errno is set.
4539  */
4540 static int
4541 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4542                                     const struct rte_flow_action *action,
4543                                     const uint64_t item_flags,
4544                                     struct rte_flow_error *error)
4545 {
4546         int ret = 0;
4547         uint64_t layer;
4548
4549         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4550         if (!ret) {
4551                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4552                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4553                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4554                 if (!(item_flags & layer))
4555                         return rte_flow_error_set(error, EINVAL,
4556                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4557                                                   NULL,
4558                                                   "no ipv6 item in pattern");
4559         }
4560         return ret;
4561 }
4562
4563 /**
4564  * Validate the modify-header TP actions.
4565  *
4566  * @param[in] action_flags
4567  *   Holds the actions detected until now.
4568  * @param[in] action
4569  *   Pointer to the modify action.
4570  * @param[in] item_flags
4571  *   Holds the items detected.
4572  * @param[out] error
4573  *   Pointer to error structure.
4574  *
4575  * @return
4576  *   0 on success, a negative errno value otherwise and rte_errno is set.
4577  */
4578 static int
4579 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4580                                   const struct rte_flow_action *action,
4581                                   const uint64_t item_flags,
4582                                   struct rte_flow_error *error)
4583 {
4584         int ret = 0;
4585         uint64_t layer;
4586
4587         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4588         if (!ret) {
4589                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4590                                  MLX5_FLOW_LAYER_INNER_L4 :
4591                                  MLX5_FLOW_LAYER_OUTER_L4;
4592                 if (!(item_flags & layer))
4593                         return rte_flow_error_set(error, EINVAL,
4594                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4595                                                   NULL, "no transport layer "
4596                                                   "in pattern");
4597         }
4598         return ret;
4599 }
4600
4601 /**
4602  * Validate the modify-header actions of increment/decrement
4603  * TCP Sequence-number.
4604  *
4605  * @param[in] action_flags
4606  *   Holds the actions detected until now.
4607  * @param[in] action
4608  *   Pointer to the modify action.
4609  * @param[in] item_flags
4610  *   Holds the items detected.
4611  * @param[out] error
4612  *   Pointer to error structure.
4613  *
4614  * @return
4615  *   0 on success, a negative errno value otherwise and rte_errno is set.
4616  */
4617 static int
4618 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4619                                        const struct rte_flow_action *action,
4620                                        const uint64_t item_flags,
4621                                        struct rte_flow_error *error)
4622 {
4623         int ret = 0;
4624         uint64_t layer;
4625
4626         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4627         if (!ret) {
4628                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4629                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4630                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4631                 if (!(item_flags & layer))
4632                         return rte_flow_error_set(error, EINVAL,
4633                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4634                                                   NULL, "no TCP item in"
4635                                                   " pattern");
4636                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4637                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4638                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4639                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4640                         return rte_flow_error_set(error, EINVAL,
4641                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4642                                                   NULL,
4643                                                   "cannot decrease and increase"
4644                                                   " TCP sequence number"
4645                                                   " at the same time");
4646         }
4647         return ret;
4648 }
4649
4650 /**
4651  * Validate the modify-header actions of increment/decrement
4652  * TCP Acknowledgment number.
4653  *
4654  * @param[in] action_flags
4655  *   Holds the actions detected until now.
4656  * @param[in] action
4657  *   Pointer to the modify action.
4658  * @param[in] item_flags
4659  *   Holds the items detected.
4660  * @param[out] error
4661  *   Pointer to error structure.
4662  *
4663  * @return
4664  *   0 on success, a negative errno value otherwise and rte_errno is set.
4665  */
4666 static int
4667 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4668                                        const struct rte_flow_action *action,
4669                                        const uint64_t item_flags,
4670                                        struct rte_flow_error *error)
4671 {
4672         int ret = 0;
4673         uint64_t layer;
4674
4675         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4676         if (!ret) {
4677                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4678                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4679                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4680                 if (!(item_flags & layer))
4681                         return rte_flow_error_set(error, EINVAL,
4682                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4683                                                   NULL, "no TCP item in"
4684                                                   " pattern");
4685                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4686                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4687                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4688                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4689                         return rte_flow_error_set(error, EINVAL,
4690                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4691                                                   NULL,
4692                                                   "cannot decrease and increase"
4693                                                   " TCP acknowledgment number"
4694                                                   " at the same time");
4695         }
4696         return ret;
4697 }
4698
4699 /**
4700  * Validate the modify-header TTL actions.
4701  *
4702  * @param[in] action_flags
4703  *   Holds the actions detected until now.
4704  * @param[in] action
4705  *   Pointer to the modify action.
4706  * @param[in] item_flags
4707  *   Holds the items detected.
4708  * @param[out] error
4709  *   Pointer to error structure.
4710  *
4711  * @return
4712  *   0 on success, a negative errno value otherwise and rte_errno is set.
4713  */
4714 static int
4715 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4716                                    const struct rte_flow_action *action,
4717                                    const uint64_t item_flags,
4718                                    struct rte_flow_error *error)
4719 {
4720         int ret = 0;
4721         uint64_t layer;
4722
4723         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4724         if (!ret) {
4725                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4726                                  MLX5_FLOW_LAYER_INNER_L3 :
4727                                  MLX5_FLOW_LAYER_OUTER_L3;
4728                 if (!(item_flags & layer))
4729                         return rte_flow_error_set(error, EINVAL,
4730                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4731                                                   NULL,
4732                                                   "no IP protocol in pattern");
4733         }
4734         return ret;
4735 }
4736
4737 /**
4738  * Validate the generic modify field actions.
4739  * @param[in] dev
4740  *   Pointer to the rte_eth_dev structure.
4741  * @param[in] action_flags
4742  *   Holds the actions detected until now.
4743  * @param[in] action
4744  *   Pointer to the modify action.
4745  * @param[in] attr
4746  *   Pointer to the flow attributes.
4747  * @param[out] error
4748  *   Pointer to error structure.
4749  *
4750  * @return
4751  *   Number of header fields to modify (0 or more) on success,
4752  *   a negative errno value otherwise and rte_errno is set.
4753  */
4754 static int
4755 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4756                                    const uint64_t action_flags,
4757                                    const struct rte_flow_action *action,
4758                                    const struct rte_flow_attr *attr,
4759                                    struct rte_flow_error *error)
4760 {
4761         int ret = 0;
4762         struct mlx5_priv *priv = dev->data->dev_private;
4763         struct mlx5_dev_config *config = &priv->config;
4764         const struct rte_flow_action_modify_field *action_modify_field =
4765                 action->conf;
4766         uint32_t dst_width = mlx5_flow_item_field_width(config,
4767                                 action_modify_field->dst.field);
4768         uint32_t src_width = mlx5_flow_item_field_width(config,
4769                                 action_modify_field->src.field);
4770
4771         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4772         if (ret)
4773                 return ret;
4774
4775         if (action_modify_field->width == 0)
4776                 return rte_flow_error_set(error, EINVAL,
4777                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4778                                 "no bits are requested to be modified");
4779         else if (action_modify_field->width > dst_width ||
4780                  action_modify_field->width > src_width)
4781                 return rte_flow_error_set(error, EINVAL,
4782                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4783                                 "cannot modify more bits than"
4784                                 " the width of a field");
4785         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4786             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4787                 if ((action_modify_field->dst.offset +
4788                      action_modify_field->width > dst_width) ||
4789                     (action_modify_field->dst.offset % 32))
4790                         return rte_flow_error_set(error, EINVAL,
4791                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4792                                         "destination offset is too big"
4793                                         " or not aligned to 4 bytes");
4794                 if (action_modify_field->dst.level &&
4795                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4796                         return rte_flow_error_set(error, ENOTSUP,
4797                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4798                                         "inner header fields modification"
4799                                         " is not supported");
4800         }
4801         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4802             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4803                 if (!attr->transfer && !attr->group)
4804                         return rte_flow_error_set(error, ENOTSUP,
4805                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4806                                         "modify field action is not"
4807                                         " supported for group 0");
4808                 if ((action_modify_field->src.offset +
4809                      action_modify_field->width > src_width) ||
4810                     (action_modify_field->src.offset % 32))
4811                         return rte_flow_error_set(error, EINVAL,
4812                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4813                                         "source offset is too big"
4814                                         " or not aligned to 4 bytes");
4815                 if (action_modify_field->src.level &&
4816                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4817                         return rte_flow_error_set(error, ENOTSUP,
4818                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4819                                         "inner header fields modification"
4820                                         " is not supported");
4821         }
4822         if ((action_modify_field->dst.field ==
4823              action_modify_field->src.field) &&
4824             (action_modify_field->dst.level ==
4825              action_modify_field->src.level))
4826                 return rte_flow_error_set(error, EINVAL,
4827                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4828                                 "source and destination fields"
4829                                 " cannot be the same");
4830         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4831             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER)
4832                 return rte_flow_error_set(error, EINVAL,
4833                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4834                                 "immediate value or a pointer to it"
4835                                 " cannot be used as a destination");
4836         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4837             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4838                 return rte_flow_error_set(error, ENOTSUP,
4839                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4840                                 "modifications of an arbitrary"
4841                                 " place in a packet is not supported");
4842         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4843             action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4844                 return rte_flow_error_set(error, ENOTSUP,
4845                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4846                                 "modifications of the 802.1Q Tag"
4847                                 " Identifier is not supported");
4848         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4849             action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4850                 return rte_flow_error_set(error, ENOTSUP,
4851                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4852                                 "modifications of the VXLAN Network"
4853                                 " Identifier is not supported");
4854         if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4855             action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4856                 return rte_flow_error_set(error, ENOTSUP,
4857                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4858                                 "modifications of the GENEVE Network"
4859                                 " Identifier is not supported");
4860         if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4861             action_modify_field->src.field == RTE_FLOW_FIELD_MARK ||
4862             action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
4863             action_modify_field->src.field == RTE_FLOW_FIELD_META) {
4864                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4865                     !mlx5_flow_ext_mreg_supported(dev))
4866                         return rte_flow_error_set(error, ENOTSUP,
4867                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4868                                         "cannot modify mark or metadata without"
4869                                         " extended metadata register support");
4870         }
4871         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4872                 return rte_flow_error_set(error, ENOTSUP,
4873                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4874                                 "add and sub operations"
4875                                 " are not supported");
4876         return (action_modify_field->width / 32) +
4877                !!(action_modify_field->width % 32);
4878 }
4879
4880 /**
4881  * Validate jump action.
4882  *
4883  * @param[in] action
4884  *   Pointer to the jump action.
4885  * @param[in] action_flags
4886  *   Holds the actions detected until now.
4887  * @param[in] attributes
4888  *   Pointer to flow attributes
4889  * @param[in] external
4890  *   Action belongs to flow rule created by request external to PMD.
4891  * @param[out] error
4892  *   Pointer to error structure.
4893  *
4894  * @return
4895  *   0 on success, a negative errno value otherwise and rte_errno is set.
4896  */
4897 static int
4898 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4899                              const struct mlx5_flow_tunnel *tunnel,
4900                              const struct rte_flow_action *action,
4901                              uint64_t action_flags,
4902                              const struct rte_flow_attr *attributes,
4903                              bool external, struct rte_flow_error *error)
4904 {
4905         uint32_t target_group, table;
4906         int ret = 0;
4907         struct flow_grp_info grp_info = {
4908                 .external = !!external,
4909                 .transfer = !!attributes->transfer,
4910                 .fdb_def_rule = 1,
4911                 .std_tbl_fix = 0
4912         };
4913         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4914                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4915                 return rte_flow_error_set(error, EINVAL,
4916                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4917                                           "can't have 2 fate actions in"
4918                                           " same flow");
4919         if (!action->conf)
4920                 return rte_flow_error_set(error, EINVAL,
4921                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4922                                           NULL, "action configuration not set");
4923         target_group =
4924                 ((const struct rte_flow_action_jump *)action->conf)->group;
4925         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4926                                        &grp_info, error);
4927         if (ret)
4928                 return ret;
4929         if (attributes->group == target_group &&
4930             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4931                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
4932                 return rte_flow_error_set(error, EINVAL,
4933                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4934                                           "target group must be other than"
4935                                           " the current flow group");
4936         return 0;
4937 }
4938
4939 /*
4940  * Validate the port_id action.
4941  *
4942  * @param[in] dev
4943  *   Pointer to rte_eth_dev structure.
4944  * @param[in] action_flags
4945  *   Bit-fields that holds the actions detected until now.
4946  * @param[in] action
4947  *   Port_id RTE action structure.
4948  * @param[in] attr
4949  *   Attributes of flow that includes this action.
4950  * @param[out] error
4951  *   Pointer to error structure.
4952  *
4953  * @return
4954  *   0 on success, a negative errno value otherwise and rte_errno is set.
4955  */
4956 static int
4957 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
4958                                 uint64_t action_flags,
4959                                 const struct rte_flow_action *action,
4960                                 const struct rte_flow_attr *attr,
4961                                 struct rte_flow_error *error)
4962 {
4963         const struct rte_flow_action_port_id *port_id;
4964         struct mlx5_priv *act_priv;
4965         struct mlx5_priv *dev_priv;
4966         uint16_t port;
4967
4968         if (!attr->transfer)
4969                 return rte_flow_error_set(error, ENOTSUP,
4970                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4971                                           NULL,
4972                                           "port id action is valid in transfer"
4973                                           " mode only");
4974         if (!action || !action->conf)
4975                 return rte_flow_error_set(error, ENOTSUP,
4976                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4977                                           NULL,
4978                                           "port id action parameters must be"
4979                                           " specified");
4980         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4981                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4982                 return rte_flow_error_set(error, EINVAL,
4983                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4984                                           "can have only one fate actions in"
4985                                           " a flow");
4986         dev_priv = mlx5_dev_to_eswitch_info(dev);
4987         if (!dev_priv)
4988                 return rte_flow_error_set(error, rte_errno,
4989                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4990                                           NULL,
4991                                           "failed to obtain E-Switch info");
4992         port_id = action->conf;
4993         port = port_id->original ? dev->data->port_id : port_id->id;
4994         act_priv = mlx5_port_to_eswitch_info(port, false);
4995         if (!act_priv)
4996                 return rte_flow_error_set
4997                                 (error, rte_errno,
4998                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4999                                  "failed to obtain E-Switch port id for port");
5000         if (act_priv->domain_id != dev_priv->domain_id)
5001                 return rte_flow_error_set
5002                                 (error, EINVAL,
5003                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5004                                  "port does not belong to"
5005                                  " E-Switch being configured");
5006         return 0;
5007 }
5008
5009 /**
5010  * Get the maximum number of modify header actions.
5011  *
5012  * @param dev
5013  *   Pointer to rte_eth_dev structure.
5014  * @param flags
5015  *   Flags bits to check if root level.
5016  *
5017  * @return
5018  *   Max number of modify header actions device can support.
5019  */
5020 static inline unsigned int
5021 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
5022                               uint64_t flags)
5023 {
5024         /*
5025          * There's no way to directly query the max capacity from FW.
5026          * The maximal value on root table should be assumed to be supported.
5027          */
5028         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
5029                 return MLX5_MAX_MODIFY_NUM;
5030         else
5031                 return MLX5_ROOT_TBL_MODIFY_NUM;
5032 }
5033
5034 /**
5035  * Validate the meter action.
5036  *
5037  * @param[in] dev
5038  *   Pointer to rte_eth_dev structure.
5039  * @param[in] action_flags
5040  *   Bit-fields that holds the actions detected until now.
5041  * @param[in] action
5042  *   Pointer to the meter action.
5043  * @param[in] attr
5044  *   Attributes of flow that includes this action.
5045  * @param[out] error
5046  *   Pointer to error structure.
5047  *
5048  * @return
5049  *   0 on success, a negative errno value otherwise and rte_ernno is set.
5050  */
5051 static int
5052 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
5053                                 uint64_t action_flags,
5054                                 const struct rte_flow_action *action,
5055                                 const struct rte_flow_attr *attr,
5056                                 bool *def_policy,
5057                                 struct rte_flow_error *error)
5058 {
5059         struct mlx5_priv *priv = dev->data->dev_private;
5060         const struct rte_flow_action_meter *am = action->conf;
5061         struct mlx5_flow_meter_info *fm;
5062         struct mlx5_flow_meter_policy *mtr_policy;
5063         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
5064
5065         if (!am)
5066                 return rte_flow_error_set(error, EINVAL,
5067                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5068                                           "meter action conf is NULL");
5069
5070         if (action_flags & MLX5_FLOW_ACTION_METER)
5071                 return rte_flow_error_set(error, ENOTSUP,
5072                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5073                                           "meter chaining not support");
5074         if (action_flags & MLX5_FLOW_ACTION_JUMP)
5075                 return rte_flow_error_set(error, ENOTSUP,
5076                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5077                                           "meter with jump not support");
5078         if (!priv->mtr_en)
5079                 return rte_flow_error_set(error, ENOTSUP,
5080                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5081                                           NULL,
5082                                           "meter action not supported");
5083         fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
5084         if (!fm)
5085                 return rte_flow_error_set(error, EINVAL,
5086                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5087                                           "Meter not found");
5088         /* aso meter can always be shared by different domains */
5089         if (fm->ref_cnt && !priv->sh->meter_aso_en &&
5090             !(fm->transfer == attr->transfer ||
5091               (!fm->ingress && !attr->ingress && attr->egress) ||
5092               (!fm->egress && !attr->egress && attr->ingress)))
5093                 return rte_flow_error_set(error, EINVAL,
5094                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5095                         "Flow attributes domain are either invalid "
5096                         "or have a domain conflict with current "
5097                         "meter attributes");
5098         if (fm->def_policy) {
5099                 if (!((attr->transfer &&
5100                         mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
5101                         (attr->egress &&
5102                         mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
5103                         (attr->ingress &&
5104                         mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
5105                         return rte_flow_error_set(error, EINVAL,
5106                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5107                                           "Flow attributes domain "
5108                                           "have a conflict with current "
5109                                           "meter domain attributes");
5110                 *def_policy = true;
5111         } else {
5112                 mtr_policy = mlx5_flow_meter_policy_find(dev,
5113                                                 fm->policy_id, NULL);
5114                 if (!mtr_policy)
5115                         return rte_flow_error_set(error, EINVAL,
5116                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5117                                           "Invalid policy id for meter ");
5118                 if (!((attr->transfer && mtr_policy->transfer) ||
5119                         (attr->egress && mtr_policy->egress) ||
5120                         (attr->ingress && mtr_policy->ingress)))
5121                         return rte_flow_error_set(error, EINVAL,
5122                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5123                                           "Flow attributes domain "
5124                                           "have a conflict with current "
5125                                           "meter domain attributes");
5126                 *def_policy = false;
5127         }
5128         return 0;
5129 }
5130
5131 /**
5132  * Validate the age action.
5133  *
5134  * @param[in] action_flags
5135  *   Holds the actions detected until now.
5136  * @param[in] action
5137  *   Pointer to the age action.
5138  * @param[in] dev
5139  *   Pointer to the Ethernet device structure.
5140  * @param[out] error
5141  *   Pointer to error structure.
5142  *
5143  * @return
5144  *   0 on success, a negative errno value otherwise and rte_errno is set.
5145  */
5146 static int
5147 flow_dv_validate_action_age(uint64_t action_flags,
5148                             const struct rte_flow_action *action,
5149                             struct rte_eth_dev *dev,
5150                             struct rte_flow_error *error)
5151 {
5152         struct mlx5_priv *priv = dev->data->dev_private;
5153         const struct rte_flow_action_age *age = action->conf;
5154
5155         if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
5156             !priv->sh->aso_age_mng))
5157                 return rte_flow_error_set(error, ENOTSUP,
5158                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5159                                           NULL,
5160                                           "age action not supported");
5161         if (!(action->conf))
5162                 return rte_flow_error_set(error, EINVAL,
5163                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5164                                           "configuration cannot be null");
5165         if (!(age->timeout))
5166                 return rte_flow_error_set(error, EINVAL,
5167                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5168                                           "invalid timeout value 0");
5169         if (action_flags & MLX5_FLOW_ACTION_AGE)
5170                 return rte_flow_error_set(error, EINVAL,
5171                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5172                                           "duplicate age actions set");
5173         return 0;
5174 }
5175
5176 /**
5177  * Validate the modify-header IPv4 DSCP actions.
5178  *
5179  * @param[in] action_flags
5180  *   Holds the actions detected until now.
5181  * @param[in] action
5182  *   Pointer to the modify action.
5183  * @param[in] item_flags
5184  *   Holds the items detected.
5185  * @param[out] error
5186  *   Pointer to error structure.
5187  *
5188  * @return
5189  *   0 on success, a negative errno value otherwise and rte_errno is set.
5190  */
5191 static int
5192 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5193                                          const struct rte_flow_action *action,
5194                                          const uint64_t item_flags,
5195                                          struct rte_flow_error *error)
5196 {
5197         int ret = 0;
5198
5199         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5200         if (!ret) {
5201                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5202                         return rte_flow_error_set(error, EINVAL,
5203                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5204                                                   NULL,
5205                                                   "no ipv4 item in pattern");
5206         }
5207         return ret;
5208 }
5209
5210 /**
5211  * Validate the modify-header IPv6 DSCP actions.
5212  *
5213  * @param[in] action_flags
5214  *   Holds the actions detected until now.
5215  * @param[in] action
5216  *   Pointer to the modify action.
5217  * @param[in] item_flags
5218  *   Holds the items detected.
5219  * @param[out] error
5220  *   Pointer to error structure.
5221  *
5222  * @return
5223  *   0 on success, a negative errno value otherwise and rte_errno is set.
5224  */
5225 static int
5226 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5227                                          const struct rte_flow_action *action,
5228                                          const uint64_t item_flags,
5229                                          struct rte_flow_error *error)
5230 {
5231         int ret = 0;
5232
5233         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5234         if (!ret) {
5235                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5236                         return rte_flow_error_set(error, EINVAL,
5237                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5238                                                   NULL,
5239                                                   "no ipv6 item in pattern");
5240         }
5241         return ret;
5242 }
5243
5244 /**
5245  * Match modify-header resource.
5246  *
5247  * @param list
5248  *   Pointer to the hash list.
5249  * @param entry
5250  *   Pointer to exist resource entry object.
5251  * @param key
5252  *   Key of the new entry.
5253  * @param ctx
5254  *   Pointer to new modify-header resource.
5255  *
5256  * @return
5257  *   0 on matching, non-zero otherwise.
5258  */
5259 int
5260 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
5261                         struct mlx5_hlist_entry *entry,
5262                         uint64_t key __rte_unused, void *cb_ctx)
5263 {
5264         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5265         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5266         struct mlx5_flow_dv_modify_hdr_resource *resource =
5267                         container_of(entry, typeof(*resource), entry);
5268         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5269
5270         key_len += ref->actions_num * sizeof(ref->actions[0]);
5271         return ref->actions_num != resource->actions_num ||
5272                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5273 }
5274
5275 struct mlx5_hlist_entry *
5276 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
5277                          void *cb_ctx)
5278 {
5279         struct mlx5_dev_ctx_shared *sh = list->ctx;
5280         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5281         struct mlx5dv_dr_domain *ns;
5282         struct mlx5_flow_dv_modify_hdr_resource *entry;
5283         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5284         int ret;
5285         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5286         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5287
5288         entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
5289                             SOCKET_ID_ANY);
5290         if (!entry) {
5291                 rte_flow_error_set(ctx->error, ENOMEM,
5292                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5293                                    "cannot allocate resource memory");
5294                 return NULL;
5295         }
5296         rte_memcpy(&entry->ft_type,
5297                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5298                    key_len + data_len);
5299         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5300                 ns = sh->fdb_domain;
5301         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5302                 ns = sh->tx_domain;
5303         else
5304                 ns = sh->rx_domain;
5305         ret = mlx5_flow_os_create_flow_action_modify_header
5306                                         (sh->ctx, ns, entry,
5307                                          data_len, &entry->action);
5308         if (ret) {
5309                 mlx5_free(entry);
5310                 rte_flow_error_set(ctx->error, ENOMEM,
5311                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5312                                    NULL, "cannot create modification action");
5313                 return NULL;
5314         }
5315         return &entry->entry;
5316 }
5317
5318 /**
5319  * Validate the sample action.
5320  *
5321  * @param[in, out] action_flags
5322  *   Holds the actions detected until now.
5323  * @param[in] action
5324  *   Pointer to the sample action.
5325  * @param[in] dev
5326  *   Pointer to the Ethernet device structure.
5327  * @param[in] attr
5328  *   Attributes of flow that includes this action.
5329  * @param[in] item_flags
5330  *   Holds the items detected.
5331  * @param[in] rss
5332  *   Pointer to the RSS action.
5333  * @param[out] sample_rss
5334  *   Pointer to the RSS action in sample action list.
5335  * @param[out] count
5336  *   Pointer to the COUNT action in sample action list.
5337  * @param[out] fdb_mirror_limit
5338  *   Pointer to the FDB mirror limitation flag.
5339  * @param[out] error
5340  *   Pointer to error structure.
5341  *
5342  * @return
5343  *   0 on success, a negative errno value otherwise and rte_errno is set.
5344  */
5345 static int
5346 flow_dv_validate_action_sample(uint64_t *action_flags,
5347                                const struct rte_flow_action *action,
5348                                struct rte_eth_dev *dev,
5349                                const struct rte_flow_attr *attr,
5350                                uint64_t item_flags,
5351                                const struct rte_flow_action_rss *rss,
5352                                const struct rte_flow_action_rss **sample_rss,
5353                                const struct rte_flow_action_count **count,
5354                                int *fdb_mirror_limit,
5355                                struct rte_flow_error *error)
5356 {
5357         struct mlx5_priv *priv = dev->data->dev_private;
5358         struct mlx5_dev_config *dev_conf = &priv->config;
5359         const struct rte_flow_action_sample *sample = action->conf;
5360         const struct rte_flow_action *act;
5361         uint64_t sub_action_flags = 0;
5362         uint16_t queue_index = 0xFFFF;
5363         int actions_n = 0;
5364         int ret;
5365
5366         if (!sample)
5367                 return rte_flow_error_set(error, EINVAL,
5368                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5369                                           "configuration cannot be NULL");
5370         if (sample->ratio == 0)
5371                 return rte_flow_error_set(error, EINVAL,
5372                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5373                                           "ratio value starts from 1");
5374         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
5375                 return rte_flow_error_set(error, ENOTSUP,
5376                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5377                                           NULL,
5378                                           "sample action not supported");
5379         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5380                 return rte_flow_error_set(error, EINVAL,
5381                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5382                                           "Multiple sample actions not "
5383                                           "supported");
5384         if (*action_flags & MLX5_FLOW_ACTION_METER)
5385                 return rte_flow_error_set(error, EINVAL,
5386                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5387                                           "wrong action order, meter should "
5388                                           "be after sample action");
5389         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5390                 return rte_flow_error_set(error, EINVAL,
5391                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5392                                           "wrong action order, jump should "
5393                                           "be after sample action");
5394         act = sample->actions;
5395         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5396                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5397                         return rte_flow_error_set(error, ENOTSUP,
5398                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5399                                                   act, "too many actions");
5400                 switch (act->type) {
5401                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5402                         ret = mlx5_flow_validate_action_queue(act,
5403                                                               sub_action_flags,
5404                                                               dev,
5405                                                               attr, error);
5406                         if (ret < 0)
5407                                 return ret;
5408                         queue_index = ((const struct rte_flow_action_queue *)
5409                                                         (act->conf))->index;
5410                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5411                         ++actions_n;
5412                         break;
5413                 case RTE_FLOW_ACTION_TYPE_RSS:
5414                         *sample_rss = act->conf;
5415                         ret = mlx5_flow_validate_action_rss(act,
5416                                                             sub_action_flags,
5417                                                             dev, attr,
5418                                                             item_flags,
5419                                                             error);
5420                         if (ret < 0)
5421                                 return ret;
5422                         if (rss && *sample_rss &&
5423                             ((*sample_rss)->level != rss->level ||
5424                             (*sample_rss)->types != rss->types))
5425                                 return rte_flow_error_set(error, ENOTSUP,
5426                                         RTE_FLOW_ERROR_TYPE_ACTION,
5427                                         NULL,
5428                                         "Can't use the different RSS types "
5429                                         "or level in the same flow");
5430                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5431                                 queue_index = (*sample_rss)->queue[0];
5432                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5433                         ++actions_n;
5434                         break;
5435                 case RTE_FLOW_ACTION_TYPE_MARK:
5436                         ret = flow_dv_validate_action_mark(dev, act,
5437                                                            sub_action_flags,
5438                                                            attr, error);
5439                         if (ret < 0)
5440                                 return ret;
5441                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5442                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5443                                                 MLX5_FLOW_ACTION_MARK_EXT;
5444                         else
5445                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5446                         ++actions_n;
5447                         break;
5448                 case RTE_FLOW_ACTION_TYPE_COUNT:
5449                         ret = flow_dv_validate_action_count
5450                                 (dev, is_shared_action_count(act),
5451                                  *action_flags | sub_action_flags,
5452                                  error);
5453                         if (ret < 0)
5454                                 return ret;
5455                         *count = act->conf;
5456                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5457                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5458                         ++actions_n;
5459                         break;
5460                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5461                         ret = flow_dv_validate_action_port_id(dev,
5462                                                               sub_action_flags,
5463                                                               act,
5464                                                               attr,
5465                                                               error);
5466                         if (ret)
5467                                 return ret;
5468                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5469                         ++actions_n;
5470                         break;
5471                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5472                         ret = flow_dv_validate_action_raw_encap_decap
5473                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5474                                  &actions_n, action, item_flags, error);
5475                         if (ret < 0)
5476                                 return ret;
5477                         ++actions_n;
5478                         break;
5479                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5480                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5481                         ret = flow_dv_validate_action_l2_encap(dev,
5482                                                                sub_action_flags,
5483                                                                act, attr,
5484                                                                error);
5485                         if (ret < 0)
5486                                 return ret;
5487                         sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5488                         ++actions_n;
5489                         break;
5490                 default:
5491                         return rte_flow_error_set(error, ENOTSUP,
5492                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5493                                                   NULL,
5494                                                   "Doesn't support optional "
5495                                                   "action");
5496                 }
5497         }
5498         if (attr->ingress && !attr->transfer) {
5499                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5500                                           MLX5_FLOW_ACTION_RSS)))
5501                         return rte_flow_error_set(error, EINVAL,
5502                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5503                                                   NULL,
5504                                                   "Ingress must has a dest "
5505                                                   "QUEUE for Sample");
5506         } else if (attr->egress && !attr->transfer) {
5507                 return rte_flow_error_set(error, ENOTSUP,
5508                                           RTE_FLOW_ERROR_TYPE_ACTION,
5509                                           NULL,
5510                                           "Sample Only support Ingress "
5511                                           "or E-Switch");
5512         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5513                 MLX5_ASSERT(attr->transfer);
5514                 if (sample->ratio > 1)
5515                         return rte_flow_error_set(error, ENOTSUP,
5516                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5517                                                   NULL,
5518                                                   "E-Switch doesn't support "
5519                                                   "any optional action "
5520                                                   "for sampling");
5521                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5522                         return rte_flow_error_set(error, ENOTSUP,
5523                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5524                                                   NULL,
5525                                                   "unsupported action QUEUE");
5526                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5527                         return rte_flow_error_set(error, ENOTSUP,
5528                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5529                                                   NULL,
5530                                                   "unsupported action QUEUE");
5531                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5532                         return rte_flow_error_set(error, EINVAL,
5533                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5534                                                   NULL,
5535                                                   "E-Switch must has a dest "
5536                                                   "port for mirroring");
5537                 if (!priv->config.hca_attr.reg_c_preserve &&
5538                      priv->representor_id != -1)
5539                         *fdb_mirror_limit = 1;
5540         }
5541         /* Continue validation for Xcap actions.*/
5542         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5543             (queue_index == 0xFFFF ||
5544              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5545                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5546                      MLX5_FLOW_XCAP_ACTIONS)
5547                         return rte_flow_error_set(error, ENOTSUP,
5548                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5549                                                   NULL, "encap and decap "
5550                                                   "combination aren't "
5551                                                   "supported");
5552                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5553                                                         MLX5_FLOW_ACTION_ENCAP))
5554                         return rte_flow_error_set(error, ENOTSUP,
5555                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5556                                                   NULL, "encap is not supported"
5557                                                   " for ingress traffic");
5558         }
5559         return 0;
5560 }
5561
5562 /**
5563  * Find existing modify-header resource or create and register a new one.
5564  *
5565  * @param dev[in, out]
5566  *   Pointer to rte_eth_dev structure.
5567  * @param[in, out] resource
5568  *   Pointer to modify-header resource.
5569  * @parm[in, out] dev_flow
5570  *   Pointer to the dev_flow.
5571  * @param[out] error
5572  *   pointer to error structure.
5573  *
5574  * @return
5575  *   0 on success otherwise -errno and errno is set.
5576  */
5577 static int
5578 flow_dv_modify_hdr_resource_register
5579                         (struct rte_eth_dev *dev,
5580                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5581                          struct mlx5_flow *dev_flow,
5582                          struct rte_flow_error *error)
5583 {
5584         struct mlx5_priv *priv = dev->data->dev_private;
5585         struct mlx5_dev_ctx_shared *sh = priv->sh;
5586         uint32_t key_len = sizeof(*resource) -
5587                            offsetof(typeof(*resource), ft_type) +
5588                            resource->actions_num * sizeof(resource->actions[0]);
5589         struct mlx5_hlist_entry *entry;
5590         struct mlx5_flow_cb_ctx ctx = {
5591                 .error = error,
5592                 .data = resource,
5593         };
5594         uint64_t key64;
5595
5596         resource->flags = dev_flow->dv.group ? 0 :
5597                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5598         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5599                                     resource->flags))
5600                 return rte_flow_error_set(error, EOVERFLOW,
5601                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5602                                           "too many modify header items");
5603         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5604         entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx);
5605         if (!entry)
5606                 return -rte_errno;
5607         resource = container_of(entry, typeof(*resource), entry);
5608         dev_flow->handle->dvh.modify_hdr = resource;
5609         return 0;
5610 }
5611
5612 /**
5613  * Get DV flow counter by index.
5614  *
5615  * @param[in] dev
5616  *   Pointer to the Ethernet device structure.
5617  * @param[in] idx
5618  *   mlx5 flow counter index in the container.
5619  * @param[out] ppool
5620  *   mlx5 flow counter pool in the container.
5621  *
5622  * @return
5623  *   Pointer to the counter, NULL otherwise.
5624  */
5625 static struct mlx5_flow_counter *
5626 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5627                            uint32_t idx,
5628                            struct mlx5_flow_counter_pool **ppool)
5629 {
5630         struct mlx5_priv *priv = dev->data->dev_private;
5631         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5632         struct mlx5_flow_counter_pool *pool;
5633
5634         /* Decrease to original index and clear shared bit. */
5635         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5636         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5637         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5638         MLX5_ASSERT(pool);
5639         if (ppool)
5640                 *ppool = pool;
5641         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5642 }
5643
5644 /**
5645  * Check the devx counter belongs to the pool.
5646  *
5647  * @param[in] pool
5648  *   Pointer to the counter pool.
5649  * @param[in] id
5650  *   The counter devx ID.
5651  *
5652  * @return
5653  *   True if counter belongs to the pool, false otherwise.
5654  */
5655 static bool
5656 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5657 {
5658         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5659                    MLX5_COUNTERS_PER_POOL;
5660
5661         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5662                 return true;
5663         return false;
5664 }
5665
5666 /**
5667  * Get a pool by devx counter ID.
5668  *
5669  * @param[in] cmng
5670  *   Pointer to the counter management.
5671  * @param[in] id
5672  *   The counter devx ID.
5673  *
5674  * @return
5675  *   The counter pool pointer if exists, NULL otherwise,
5676  */
5677 static struct mlx5_flow_counter_pool *
5678 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5679 {
5680         uint32_t i;
5681         struct mlx5_flow_counter_pool *pool = NULL;
5682
5683         rte_spinlock_lock(&cmng->pool_update_sl);
5684         /* Check last used pool. */
5685         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5686             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5687                 pool = cmng->pools[cmng->last_pool_idx];
5688                 goto out;
5689         }
5690         /* ID out of range means no suitable pool in the container. */
5691         if (id > cmng->max_id || id < cmng->min_id)
5692                 goto out;
5693         /*
5694          * Find the pool from the end of the container, since mostly counter
5695          * ID is sequence increasing, and the last pool should be the needed
5696          * one.
5697          */
5698         i = cmng->n_valid;
5699         while (i--) {
5700                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5701
5702                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5703                         pool = pool_tmp;
5704                         break;
5705                 }
5706         }
5707 out:
5708         rte_spinlock_unlock(&cmng->pool_update_sl);
5709         return pool;
5710 }
5711
5712 /**
5713  * Resize a counter container.
5714  *
5715  * @param[in] dev
5716  *   Pointer to the Ethernet device structure.
5717  *
5718  * @return
5719  *   0 on success, otherwise negative errno value and rte_errno is set.
5720  */
5721 static int
5722 flow_dv_container_resize(struct rte_eth_dev *dev)
5723 {
5724         struct mlx5_priv *priv = dev->data->dev_private;
5725         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5726         void *old_pools = cmng->pools;
5727         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5728         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5729         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5730
5731         if (!pools) {
5732                 rte_errno = ENOMEM;
5733                 return -ENOMEM;
5734         }
5735         if (old_pools)
5736                 memcpy(pools, old_pools, cmng->n *
5737                                        sizeof(struct mlx5_flow_counter_pool *));
5738         cmng->n = resize;
5739         cmng->pools = pools;
5740         if (old_pools)
5741                 mlx5_free(old_pools);
5742         return 0;
5743 }
5744
5745 /**
5746  * Query a devx flow counter.
5747  *
5748  * @param[in] dev
5749  *   Pointer to the Ethernet device structure.
5750  * @param[in] counter
5751  *   Index to the flow counter.
5752  * @param[out] pkts
5753  *   The statistics value of packets.
5754  * @param[out] bytes
5755  *   The statistics value of bytes.
5756  *
5757  * @return
5758  *   0 on success, otherwise a negative errno value and rte_errno is set.
5759  */
5760 static inline int
5761 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5762                      uint64_t *bytes)
5763 {
5764         struct mlx5_priv *priv = dev->data->dev_private;
5765         struct mlx5_flow_counter_pool *pool = NULL;
5766         struct mlx5_flow_counter *cnt;
5767         int offset;
5768
5769         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5770         MLX5_ASSERT(pool);
5771         if (priv->sh->cmng.counter_fallback)
5772                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
5773                                         0, pkts, bytes, 0, NULL, NULL, 0);
5774         rte_spinlock_lock(&pool->sl);
5775         if (!pool->raw) {
5776                 *pkts = 0;
5777                 *bytes = 0;
5778         } else {
5779                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
5780                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
5781                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
5782         }
5783         rte_spinlock_unlock(&pool->sl);
5784         return 0;
5785 }
5786
5787 /**
5788  * Create and initialize a new counter pool.
5789  *
5790  * @param[in] dev
5791  *   Pointer to the Ethernet device structure.
5792  * @param[out] dcs
5793  *   The devX counter handle.
5794  * @param[in] age
5795  *   Whether the pool is for counter that was allocated for aging.
5796  * @param[in/out] cont_cur
5797  *   Pointer to the container pointer, it will be update in pool resize.
5798  *
5799  * @return
5800  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
5801  */
5802 static struct mlx5_flow_counter_pool *
5803 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
5804                     uint32_t age)
5805 {
5806         struct mlx5_priv *priv = dev->data->dev_private;
5807         struct mlx5_flow_counter_pool *pool;
5808         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5809         bool fallback = priv->sh->cmng.counter_fallback;
5810         uint32_t size = sizeof(*pool);
5811
5812         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
5813         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
5814         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
5815         if (!pool) {
5816                 rte_errno = ENOMEM;
5817                 return NULL;
5818         }
5819         pool->raw = NULL;
5820         pool->is_aged = !!age;
5821         pool->query_gen = 0;
5822         pool->min_dcs = dcs;
5823         rte_spinlock_init(&pool->sl);
5824         rte_spinlock_init(&pool->csl);
5825         TAILQ_INIT(&pool->counters[0]);
5826         TAILQ_INIT(&pool->counters[1]);
5827         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
5828         rte_spinlock_lock(&cmng->pool_update_sl);
5829         pool->index = cmng->n_valid;
5830         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
5831                 mlx5_free(pool);
5832                 rte_spinlock_unlock(&cmng->pool_update_sl);
5833                 return NULL;
5834         }
5835         cmng->pools[pool->index] = pool;
5836         cmng->n_valid++;
5837         if (unlikely(fallback)) {
5838                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
5839
5840                 if (base < cmng->min_id)
5841                         cmng->min_id = base;
5842                 if (base > cmng->max_id)
5843                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
5844                 cmng->last_pool_idx = pool->index;
5845         }
5846         rte_spinlock_unlock(&cmng->pool_update_sl);
5847         return pool;
5848 }
5849
5850 /**
5851  * Prepare a new counter and/or a new counter pool.
5852  *
5853  * @param[in] dev
5854  *   Pointer to the Ethernet device structure.
5855  * @param[out] cnt_free
5856  *   Where to put the pointer of a new counter.
5857  * @param[in] age
5858  *   Whether the pool is for counter that was allocated for aging.
5859  *
5860  * @return
5861  *   The counter pool pointer and @p cnt_free is set on success,
5862  *   NULL otherwise and rte_errno is set.
5863  */
5864 static struct mlx5_flow_counter_pool *
5865 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
5866                              struct mlx5_flow_counter **cnt_free,
5867                              uint32_t age)
5868 {
5869         struct mlx5_priv *priv = dev->data->dev_private;
5870         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5871         struct mlx5_flow_counter_pool *pool;
5872         struct mlx5_counters tmp_tq;
5873         struct mlx5_devx_obj *dcs = NULL;
5874         struct mlx5_flow_counter *cnt;
5875         enum mlx5_counter_type cnt_type =
5876                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5877         bool fallback = priv->sh->cmng.counter_fallback;
5878         uint32_t i;
5879
5880         if (fallback) {
5881                 /* bulk_bitmap must be 0 for single counter allocation. */
5882                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
5883                 if (!dcs)
5884                         return NULL;
5885                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
5886                 if (!pool) {
5887                         pool = flow_dv_pool_create(dev, dcs, age);
5888                         if (!pool) {
5889                                 mlx5_devx_cmd_destroy(dcs);
5890                                 return NULL;
5891                         }
5892                 }
5893                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
5894                 cnt = MLX5_POOL_GET_CNT(pool, i);
5895                 cnt->pool = pool;
5896                 cnt->dcs_when_free = dcs;
5897                 *cnt_free = cnt;
5898                 return pool;
5899         }
5900         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
5901         if (!dcs) {
5902                 rte_errno = ENODATA;
5903                 return NULL;
5904         }
5905         pool = flow_dv_pool_create(dev, dcs, age);
5906         if (!pool) {
5907                 mlx5_devx_cmd_destroy(dcs);
5908                 return NULL;
5909         }
5910         TAILQ_INIT(&tmp_tq);
5911         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
5912                 cnt = MLX5_POOL_GET_CNT(pool, i);
5913                 cnt->pool = pool;
5914                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
5915         }
5916         rte_spinlock_lock(&cmng->csl[cnt_type]);
5917         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
5918         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5919         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
5920         (*cnt_free)->pool = pool;
5921         return pool;
5922 }
5923
5924 /**
5925  * Allocate a flow counter.
5926  *
5927  * @param[in] dev
5928  *   Pointer to the Ethernet device structure.
5929  * @param[in] age
5930  *   Whether the counter was allocated for aging.
5931  *
5932  * @return
5933  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5934  */
5935 static uint32_t
5936 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
5937 {
5938         struct mlx5_priv *priv = dev->data->dev_private;
5939         struct mlx5_flow_counter_pool *pool = NULL;
5940         struct mlx5_flow_counter *cnt_free = NULL;
5941         bool fallback = priv->sh->cmng.counter_fallback;
5942         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5943         enum mlx5_counter_type cnt_type =
5944                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5945         uint32_t cnt_idx;
5946
5947         if (!priv->config.devx) {
5948                 rte_errno = ENOTSUP;
5949                 return 0;
5950         }
5951         /* Get free counters from container. */
5952         rte_spinlock_lock(&cmng->csl[cnt_type]);
5953         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
5954         if (cnt_free)
5955                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
5956         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5957         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
5958                 goto err;
5959         pool = cnt_free->pool;
5960         if (fallback)
5961                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
5962         /* Create a DV counter action only in the first time usage. */
5963         if (!cnt_free->action) {
5964                 uint16_t offset;
5965                 struct mlx5_devx_obj *dcs;
5966                 int ret;
5967
5968                 if (!fallback) {
5969                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
5970                         dcs = pool->min_dcs;
5971                 } else {
5972                         offset = 0;
5973                         dcs = cnt_free->dcs_when_free;
5974                 }
5975                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
5976                                                             &cnt_free->action);
5977                 if (ret) {
5978                         rte_errno = errno;
5979                         goto err;
5980                 }
5981         }
5982         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
5983                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
5984         /* Update the counter reset values. */
5985         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
5986                                  &cnt_free->bytes))
5987                 goto err;
5988         if (!fallback && !priv->sh->cmng.query_thread_on)
5989                 /* Start the asynchronous batch query by the host thread. */
5990                 mlx5_set_query_alarm(priv->sh);
5991         /*
5992          * When the count action isn't shared (by ID), shared_info field is
5993          * used for indirect action API's refcnt.
5994          * When the counter action is not shared neither by ID nor by indirect
5995          * action API, shared info must be 1.
5996          */
5997         cnt_free->shared_info.refcnt = 1;
5998         return cnt_idx;
5999 err:
6000         if (cnt_free) {
6001                 cnt_free->pool = pool;
6002                 if (fallback)
6003                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
6004                 rte_spinlock_lock(&cmng->csl[cnt_type]);
6005                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
6006                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
6007         }
6008         return 0;
6009 }
6010
6011 /**
6012  * Allocate a shared flow counter.
6013  *
6014  * @param[in] ctx
6015  *   Pointer to the shared counter configuration.
6016  * @param[in] data
6017  *   Pointer to save the allocated counter index.
6018  *
6019  * @return
6020  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6021  */
6022
6023 static int32_t
6024 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
6025 {
6026         struct mlx5_shared_counter_conf *conf = ctx;
6027         struct rte_eth_dev *dev = conf->dev;
6028         struct mlx5_flow_counter *cnt;
6029
6030         data->dword = flow_dv_counter_alloc(dev, 0);
6031         data->dword |= MLX5_CNT_SHARED_OFFSET;
6032         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
6033         cnt->shared_info.id = conf->id;
6034         return 0;
6035 }
6036
6037 /**
6038  * Get a shared flow counter.
6039  *
6040  * @param[in] dev
6041  *   Pointer to the Ethernet device structure.
6042  * @param[in] id
6043  *   Counter identifier.
6044  *
6045  * @return
6046  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6047  */
6048 static uint32_t
6049 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
6050 {
6051         struct mlx5_priv *priv = dev->data->dev_private;
6052         struct mlx5_shared_counter_conf conf = {
6053                 .dev = dev,
6054                 .id = id,
6055         };
6056         union mlx5_l3t_data data = {
6057                 .dword = 0,
6058         };
6059
6060         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
6061                                flow_dv_counter_alloc_shared_cb, &conf);
6062         return data.dword;
6063 }
6064
6065 /**
6066  * Get age param from counter index.
6067  *
6068  * @param[in] dev
6069  *   Pointer to the Ethernet device structure.
6070  * @param[in] counter
6071  *   Index to the counter handler.
6072  *
6073  * @return
6074  *   The aging parameter specified for the counter index.
6075  */
6076 static struct mlx5_age_param*
6077 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
6078                                 uint32_t counter)
6079 {
6080         struct mlx5_flow_counter *cnt;
6081         struct mlx5_flow_counter_pool *pool = NULL;
6082
6083         flow_dv_counter_get_by_idx(dev, counter, &pool);
6084         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
6085         cnt = MLX5_POOL_GET_CNT(pool, counter);
6086         return MLX5_CNT_TO_AGE(cnt);
6087 }
6088
6089 /**
6090  * Remove a flow counter from aged counter list.
6091  *
6092  * @param[in] dev
6093  *   Pointer to the Ethernet device structure.
6094  * @param[in] counter
6095  *   Index to the counter handler.
6096  * @param[in] cnt
6097  *   Pointer to the counter handler.
6098  */
6099 static void
6100 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
6101                                 uint32_t counter, struct mlx5_flow_counter *cnt)
6102 {
6103         struct mlx5_age_info *age_info;
6104         struct mlx5_age_param *age_param;
6105         struct mlx5_priv *priv = dev->data->dev_private;
6106         uint16_t expected = AGE_CANDIDATE;
6107
6108         age_info = GET_PORT_AGE_INFO(priv);
6109         age_param = flow_dv_counter_idx_get_age(dev, counter);
6110         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
6111                                          AGE_FREE, false, __ATOMIC_RELAXED,
6112                                          __ATOMIC_RELAXED)) {
6113                 /**
6114                  * We need the lock even it is age timeout,
6115                  * since counter may still in process.
6116                  */
6117                 rte_spinlock_lock(&age_info->aged_sl);
6118                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
6119                 rte_spinlock_unlock(&age_info->aged_sl);
6120                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
6121         }
6122 }
6123
6124 /**
6125  * Release a flow counter.
6126  *
6127  * @param[in] dev
6128  *   Pointer to the Ethernet device structure.
6129  * @param[in] counter
6130  *   Index to the counter handler.
6131  */
6132 static void
6133 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
6134 {
6135         struct mlx5_priv *priv = dev->data->dev_private;
6136         struct mlx5_flow_counter_pool *pool = NULL;
6137         struct mlx5_flow_counter *cnt;
6138         enum mlx5_counter_type cnt_type;
6139
6140         if (!counter)
6141                 return;
6142         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6143         MLX5_ASSERT(pool);
6144         if (pool->is_aged) {
6145                 flow_dv_counter_remove_from_age(dev, counter, cnt);
6146         } else {
6147                 /*
6148                  * If the counter action is shared by ID, the l3t_clear_entry
6149                  * function reduces its references counter. If after the
6150                  * reduction the action is still referenced, the function
6151                  * returns here and does not release it.
6152                  */
6153                 if (IS_LEGACY_SHARED_CNT(counter) &&
6154                     mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl,
6155                                          cnt->shared_info.id))
6156                         return;
6157                 /*
6158                  * If the counter action is shared by indirect action API,
6159                  * the atomic function reduces its references counter.
6160                  * If after the reduction the action is still referenced, the
6161                  * function returns here and does not release it.
6162                  * When the counter action is not shared neither by ID nor by
6163                  * indirect action API, shared info is 1 before the reduction,
6164                  * so this condition is failed and function doesn't return here.
6165                  */
6166                 if (!IS_LEGACY_SHARED_CNT(counter) &&
6167                     __atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
6168                                        __ATOMIC_RELAXED))
6169                         return;
6170         }
6171         cnt->pool = pool;
6172         /*
6173          * Put the counter back to list to be updated in none fallback mode.
6174          * Currently, we are using two list alternately, while one is in query,
6175          * add the freed counter to the other list based on the pool query_gen
6176          * value. After query finishes, add counter the list to the global
6177          * container counter list. The list changes while query starts. In
6178          * this case, lock will not be needed as query callback and release
6179          * function both operate with the different list.
6180          */
6181         if (!priv->sh->cmng.counter_fallback) {
6182                 rte_spinlock_lock(&pool->csl);
6183                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
6184                 rte_spinlock_unlock(&pool->csl);
6185         } else {
6186                 cnt->dcs_when_free = cnt->dcs_when_active;
6187                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6188                                            MLX5_COUNTER_TYPE_ORIGIN;
6189                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
6190                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
6191                                   cnt, next);
6192                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
6193         }
6194 }
6195
6196 /**
6197  * Resize a meter id container.
6198  *
6199  * @param[in] dev
6200  *   Pointer to the Ethernet device structure.
6201  *
6202  * @return
6203  *   0 on success, otherwise negative errno value and rte_errno is set.
6204  */
6205 static int
6206 flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
6207 {
6208         struct mlx5_priv *priv = dev->data->dev_private;
6209         struct mlx5_aso_mtr_pools_mng *pools_mng =
6210                                 &priv->sh->mtrmng->pools_mng;
6211         void *old_pools = pools_mng->pools;
6212         uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
6213         uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
6214         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6215
6216         if (!pools) {
6217                 rte_errno = ENOMEM;
6218                 return -ENOMEM;
6219         }
6220         if (!pools_mng->n)
6221                 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6222                         mlx5_free(pools);
6223                         return -ENOMEM;
6224                 }
6225         if (old_pools)
6226                 memcpy(pools, old_pools, pools_mng->n *
6227                                        sizeof(struct mlx5_aso_mtr_pool *));
6228         pools_mng->n = resize;
6229         pools_mng->pools = pools;
6230         if (old_pools)
6231                 mlx5_free(old_pools);
6232         return 0;
6233 }
6234
6235 /**
6236  * Prepare a new meter and/or a new meter pool.
6237  *
6238  * @param[in] dev
6239  *   Pointer to the Ethernet device structure.
6240  * @param[out] mtr_free
6241  *   Where to put the pointer of a new meter.g.
6242  *
6243  * @return
6244  *   The meter pool pointer and @mtr_free is set on success,
6245  *   NULL otherwise and rte_errno is set.
6246  */
6247 static struct mlx5_aso_mtr_pool *
6248 flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
6249                              struct mlx5_aso_mtr **mtr_free)
6250 {
6251         struct mlx5_priv *priv = dev->data->dev_private;
6252         struct mlx5_aso_mtr_pools_mng *pools_mng =
6253                                 &priv->sh->mtrmng->pools_mng;
6254         struct mlx5_aso_mtr_pool *pool = NULL;
6255         struct mlx5_devx_obj *dcs = NULL;
6256         uint32_t i;
6257         uint32_t log_obj_size;
6258
6259         log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6260         dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->ctx,
6261                         priv->sh->pdn, log_obj_size);
6262         if (!dcs) {
6263                 rte_errno = ENODATA;
6264                 return NULL;
6265         }
6266         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6267         if (!pool) {
6268                 rte_errno = ENOMEM;
6269                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6270                 return NULL;
6271         }
6272         pool->devx_obj = dcs;
6273         pool->index = pools_mng->n_valid;
6274         if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6275                 mlx5_free(pool);
6276                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6277                 return NULL;
6278         }
6279         pools_mng->pools[pool->index] = pool;
6280         pools_mng->n_valid++;
6281         for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6282                 pool->mtrs[i].offset = i;
6283                 LIST_INSERT_HEAD(&pools_mng->meters,
6284                                                 &pool->mtrs[i], next);
6285         }
6286         pool->mtrs[0].offset = 0;
6287         *mtr_free = &pool->mtrs[0];
6288         return pool;
6289 }
6290
6291 /**
6292  * Release a flow meter into pool.
6293  *
6294  * @param[in] dev
6295  *   Pointer to the Ethernet device structure.
6296  * @param[in] mtr_idx
6297  *   Index to aso flow meter.
6298  */
6299 static void
6300 flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6301 {
6302         struct mlx5_priv *priv = dev->data->dev_private;
6303         struct mlx5_aso_mtr_pools_mng *pools_mng =
6304                                 &priv->sh->mtrmng->pools_mng;
6305         struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6306
6307         MLX5_ASSERT(aso_mtr);
6308         rte_spinlock_lock(&pools_mng->mtrsl);
6309         memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6310         aso_mtr->state = ASO_METER_FREE;
6311         LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6312         rte_spinlock_unlock(&pools_mng->mtrsl);
6313 }
6314
6315 /**
6316  * Allocate a aso flow meter.
6317  *
6318  * @param[in] dev
6319  *   Pointer to the Ethernet device structure.
6320  *
6321  * @return
6322  *   Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6323  */
6324 static uint32_t
6325 flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6326 {
6327         struct mlx5_priv *priv = dev->data->dev_private;
6328         struct mlx5_aso_mtr *mtr_free = NULL;
6329         struct mlx5_aso_mtr_pools_mng *pools_mng =
6330                                 &priv->sh->mtrmng->pools_mng;
6331         struct mlx5_aso_mtr_pool *pool;
6332         uint32_t mtr_idx = 0;
6333
6334         if (!priv->config.devx) {
6335                 rte_errno = ENOTSUP;
6336                 return 0;
6337         }
6338         /* Allocate the flow meter memory. */
6339         /* Get free meters from management. */
6340         rte_spinlock_lock(&pools_mng->mtrsl);
6341         mtr_free = LIST_FIRST(&pools_mng->meters);
6342         if (mtr_free)
6343                 LIST_REMOVE(mtr_free, next);
6344         if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6345                 rte_spinlock_unlock(&pools_mng->mtrsl);
6346                 return 0;
6347         }
6348         mtr_free->state = ASO_METER_WAIT;
6349         rte_spinlock_unlock(&pools_mng->mtrsl);
6350         pool = container_of(mtr_free,
6351                         struct mlx5_aso_mtr_pool,
6352                         mtrs[mtr_free->offset]);
6353         mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6354         if (!mtr_free->fm.meter_action) {
6355 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6356                 struct rte_flow_error error;
6357                 uint8_t reg_id;
6358
6359                 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6360                 mtr_free->fm.meter_action =
6361                         mlx5_glue->dv_create_flow_action_aso
6362                                                 (priv->sh->rx_domain,
6363                                                  pool->devx_obj->obj,
6364                                                  mtr_free->offset,
6365                                                  (1 << MLX5_FLOW_COLOR_GREEN),
6366                                                  reg_id - REG_C_0);
6367 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6368                 if (!mtr_free->fm.meter_action) {
6369                         flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6370                         return 0;
6371                 }
6372         }
6373         return mtr_idx;
6374 }
6375
6376 /**
6377  * Verify the @p attributes will be correctly understood by the NIC and store
6378  * them in the @p flow if everything is correct.
6379  *
6380  * @param[in] dev
6381  *   Pointer to dev struct.
6382  * @param[in] attributes
6383  *   Pointer to flow attributes
6384  * @param[in] external
6385  *   This flow rule is created by request external to PMD.
6386  * @param[out] error
6387  *   Pointer to error structure.
6388  *
6389  * @return
6390  *   - 0 on success and non root table.
6391  *   - 1 on success and root table.
6392  *   - a negative errno value otherwise and rte_errno is set.
6393  */
6394 static int
6395 flow_dv_validate_attributes(struct rte_eth_dev *dev,
6396                             const struct mlx5_flow_tunnel *tunnel,
6397                             const struct rte_flow_attr *attributes,
6398                             const struct flow_grp_info *grp_info,
6399                             struct rte_flow_error *error)
6400 {
6401         struct mlx5_priv *priv = dev->data->dev_private;
6402         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6403         int ret = 0;
6404
6405 #ifndef HAVE_MLX5DV_DR
6406         RTE_SET_USED(tunnel);
6407         RTE_SET_USED(grp_info);
6408         if (attributes->group)
6409                 return rte_flow_error_set(error, ENOTSUP,
6410                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6411                                           NULL,
6412                                           "groups are not supported");
6413 #else
6414         uint32_t table = 0;
6415
6416         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6417                                        grp_info, error);
6418         if (ret)
6419                 return ret;
6420         if (!table)
6421                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6422 #endif
6423         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6424             attributes->priority > lowest_priority)
6425                 return rte_flow_error_set(error, ENOTSUP,
6426                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6427                                           NULL,
6428                                           "priority out of range");
6429         if (attributes->transfer) {
6430                 if (!priv->config.dv_esw_en)
6431                         return rte_flow_error_set
6432                                 (error, ENOTSUP,
6433                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6434                                  "E-Switch dr is not supported");
6435                 if (!(priv->representor || priv->master))
6436                         return rte_flow_error_set
6437                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6438                                  NULL, "E-Switch configuration can only be"
6439                                  " done by a master or a representor device");
6440                 if (attributes->egress)
6441                         return rte_flow_error_set
6442                                 (error, ENOTSUP,
6443                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6444                                  "egress is not supported");
6445         }
6446         if (!(attributes->egress ^ attributes->ingress))
6447                 return rte_flow_error_set(error, ENOTSUP,
6448                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6449                                           "must specify exactly one of "
6450                                           "ingress or egress");
6451         return ret;
6452 }
6453
6454 static uint16_t
6455 mlx5_flow_locate_proto_l3(const struct rte_flow_item **head,
6456                           const struct rte_flow_item *end)
6457 {
6458         const struct rte_flow_item *item = *head;
6459         uint16_t l3_protocol;
6460
6461         for (; item != end; item++) {
6462                 switch (item->type) {
6463                 default:
6464                         break;
6465                 case RTE_FLOW_ITEM_TYPE_IPV4:
6466                         l3_protocol = RTE_ETHER_TYPE_IPV4;
6467                         goto l3_ok;
6468                 case RTE_FLOW_ITEM_TYPE_IPV6:
6469                         l3_protocol = RTE_ETHER_TYPE_IPV6;
6470                         goto l3_ok;
6471                 case RTE_FLOW_ITEM_TYPE_ETH:
6472                         if (item->mask && item->spec) {
6473                                 MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_eth,
6474                                                             type, item,
6475                                                             l3_protocol);
6476                                 if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
6477                                     l3_protocol == RTE_ETHER_TYPE_IPV6)
6478                                         goto l3_ok;
6479                         }
6480                         break;
6481                 case RTE_FLOW_ITEM_TYPE_VLAN:
6482                         if (item->mask && item->spec) {
6483                                 MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_vlan,
6484                                                             inner_type, item,
6485                                                             l3_protocol);
6486                                 if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
6487                                     l3_protocol == RTE_ETHER_TYPE_IPV6)
6488                                         goto l3_ok;
6489                         }
6490                         break;
6491                 }
6492         }
6493         return 0;
6494 l3_ok:
6495         *head = item;
6496         return l3_protocol;
6497 }
6498
6499 static uint8_t
6500 mlx5_flow_locate_proto_l4(const struct rte_flow_item **head,
6501                           const struct rte_flow_item *end)
6502 {
6503         const struct rte_flow_item *item = *head;
6504         uint8_t l4_protocol;
6505
6506         for (; item != end; item++) {
6507                 switch (item->type) {
6508                 default:
6509                         break;
6510                 case RTE_FLOW_ITEM_TYPE_TCP:
6511                         l4_protocol = IPPROTO_TCP;
6512                         goto l4_ok;
6513                 case RTE_FLOW_ITEM_TYPE_UDP:
6514                         l4_protocol = IPPROTO_UDP;
6515                         goto l4_ok;
6516                 case RTE_FLOW_ITEM_TYPE_IPV4:
6517                         if (item->mask && item->spec) {
6518                                 const struct rte_flow_item_ipv4 *mask, *spec;
6519
6520                                 mask = (typeof(mask))item->mask;
6521                                 spec = (typeof(spec))item->spec;
6522                                 l4_protocol = mask->hdr.next_proto_id &
6523                                               spec->hdr.next_proto_id;
6524                                 if (l4_protocol == IPPROTO_TCP ||
6525                                     l4_protocol == IPPROTO_UDP)
6526                                         goto l4_ok;
6527                         }
6528                         break;
6529                 case RTE_FLOW_ITEM_TYPE_IPV6:
6530                         if (item->mask && item->spec) {
6531                                 const struct rte_flow_item_ipv6 *mask, *spec;
6532                                 mask = (typeof(mask))item->mask;
6533                                 spec = (typeof(spec))item->spec;
6534                                 l4_protocol = mask->hdr.proto & spec->hdr.proto;
6535                                 if (l4_protocol == IPPROTO_TCP ||
6536                                     l4_protocol == IPPROTO_UDP)
6537                                         goto l4_ok;
6538                         }
6539                         break;
6540                 }
6541         }
6542         return 0;
6543 l4_ok:
6544         *head = item;
6545         return l4_protocol;
6546 }
6547
6548 static int
6549 flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
6550                                 const struct rte_flow_item *rule_items,
6551                                 const struct rte_flow_item *integrity_item,
6552                                 struct rte_flow_error *error)
6553 {
6554         struct mlx5_priv *priv = dev->data->dev_private;
6555         const struct rte_flow_item *tunnel_item, *end_item, *item = rule_items;
6556         const struct rte_flow_item_integrity *mask = (typeof(mask))
6557                                                      integrity_item->mask;
6558         const struct rte_flow_item_integrity *spec = (typeof(spec))
6559                                                      integrity_item->spec;
6560         uint32_t protocol;
6561
6562         if (!priv->config.hca_attr.pkt_integrity_match)
6563                 return rte_flow_error_set(error, ENOTSUP,
6564                                           RTE_FLOW_ERROR_TYPE_ITEM,
6565                                           integrity_item,
6566                                           "packet integrity integrity_item not supported");
6567         if (!mask)
6568                 mask = &rte_flow_item_integrity_mask;
6569         if (!mlx5_validate_integrity_item(mask))
6570                 return rte_flow_error_set(error, ENOTSUP,
6571                                           RTE_FLOW_ERROR_TYPE_ITEM,
6572                                           integrity_item,
6573                                           "unsupported integrity filter");
6574         tunnel_item = mlx5_flow_find_tunnel_item(rule_items);
6575         if (spec->level > 1) {
6576                 if (!tunnel_item)
6577                         return rte_flow_error_set(error, ENOTSUP,
6578                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6579                                                   integrity_item,
6580                                                   "missing tunnel item");
6581                 item = tunnel_item;
6582                 end_item = mlx5_find_end_item(tunnel_item);
6583         } else {
6584                 end_item = tunnel_item ? tunnel_item :
6585                            mlx5_find_end_item(integrity_item);
6586         }
6587         if (mask->l3_ok || mask->ipv4_csum_ok) {
6588                 protocol = mlx5_flow_locate_proto_l3(&item, end_item);
6589                 if (!protocol)
6590                         return rte_flow_error_set(error, EINVAL,
6591                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6592                                                   integrity_item,
6593                                                   "missing L3 protocol");
6594         }
6595         if (mask->l4_ok || mask->l4_csum_ok) {
6596                 protocol = mlx5_flow_locate_proto_l4(&item, end_item);
6597                 if (!protocol)
6598                         return rte_flow_error_set(error, EINVAL,
6599                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6600                                                   integrity_item,
6601                                                   "missing L4 protocol");
6602         }
6603         return 0;
6604 }
6605
6606 /**
6607  * Internal validation function. For validating both actions and items.
6608  *
6609  * @param[in] dev
6610  *   Pointer to the rte_eth_dev structure.
6611  * @param[in] attr
6612  *   Pointer to the flow attributes.
6613  * @param[in] items
6614  *   Pointer to the list of items.
6615  * @param[in] actions
6616  *   Pointer to the list of actions.
6617  * @param[in] external
6618  *   This flow rule is created by request external to PMD.
6619  * @param[in] hairpin
6620  *   Number of hairpin TX actions, 0 means classic flow.
6621  * @param[out] error
6622  *   Pointer to the error structure.
6623  *
6624  * @return
6625  *   0 on success, a negative errno value otherwise and rte_errno is set.
6626  */
6627 static int
6628 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6629                  const struct rte_flow_item items[],
6630                  const struct rte_flow_action actions[],
6631                  bool external, int hairpin, struct rte_flow_error *error)
6632 {
6633         int ret;
6634         uint64_t action_flags = 0;
6635         uint64_t item_flags = 0;
6636         uint64_t last_item = 0;
6637         uint8_t next_protocol = 0xff;
6638         uint16_t ether_type = 0;
6639         int actions_n = 0;
6640         uint8_t item_ipv6_proto = 0;
6641         int fdb_mirror_limit = 0;
6642         int modify_after_mirror = 0;
6643         const struct rte_flow_item *geneve_item = NULL;
6644         const struct rte_flow_item *gre_item = NULL;
6645         const struct rte_flow_item *gtp_item = NULL;
6646         const struct rte_flow_action_raw_decap *decap;
6647         const struct rte_flow_action_raw_encap *encap;
6648         const struct rte_flow_action_rss *rss = NULL;
6649         const struct rte_flow_action_rss *sample_rss = NULL;
6650         const struct rte_flow_action_count *sample_count = NULL;
6651         const struct rte_flow_item_tcp nic_tcp_mask = {
6652                 .hdr = {
6653                         .tcp_flags = 0xFF,
6654                         .src_port = RTE_BE16(UINT16_MAX),
6655                         .dst_port = RTE_BE16(UINT16_MAX),
6656                 }
6657         };
6658         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6659                 .hdr = {
6660                         .src_addr =
6661                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6662                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6663                         .dst_addr =
6664                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6665                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6666                         .vtc_flow = RTE_BE32(0xffffffff),
6667                         .proto = 0xff,
6668                         .hop_limits = 0xff,
6669                 },
6670                 .has_frag_ext = 1,
6671         };
6672         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6673                 .hdr = {
6674                         .common = {
6675                                 .u32 =
6676                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6677                                         .type = 0xFF,
6678                                         }).u32),
6679                         },
6680                         .dummy[0] = 0xffffffff,
6681                 },
6682         };
6683         struct mlx5_priv *priv = dev->data->dev_private;
6684         struct mlx5_dev_config *dev_conf = &priv->config;
6685         uint16_t queue_index = 0xFFFF;
6686         const struct rte_flow_item_vlan *vlan_m = NULL;
6687         uint32_t rw_act_num = 0;
6688         uint64_t is_root;
6689         const struct mlx5_flow_tunnel *tunnel;
6690         enum mlx5_tof_rule_type tof_rule_type;
6691         struct flow_grp_info grp_info = {
6692                 .external = !!external,
6693                 .transfer = !!attr->transfer,
6694                 .fdb_def_rule = !!priv->fdb_def_rule,
6695                 .std_tbl_fix = true,
6696         };
6697         const struct rte_eth_hairpin_conf *conf;
6698         const struct rte_flow_item *rule_items = items;
6699         bool def_policy = false;
6700
6701         if (items == NULL)
6702                 return -1;
6703         tunnel = is_tunnel_offload_active(dev) ?
6704                  mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
6705         if (tunnel) {
6706                 if (priv->representor)
6707                         return rte_flow_error_set
6708                                 (error, ENOTSUP,
6709                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6710                                  NULL, "decap not supported for VF representor");
6711                 if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
6712                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6713                 else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
6714                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6715                                         MLX5_FLOW_ACTION_DECAP;
6716                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6717                                         (dev, attr, tunnel, tof_rule_type);
6718         }
6719         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6720         if (ret < 0)
6721                 return ret;
6722         is_root = (uint64_t)ret;
6723         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6724                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6725                 int type = items->type;
6726
6727                 if (!mlx5_flow_os_item_supported(type))
6728                         return rte_flow_error_set(error, ENOTSUP,
6729                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6730                                                   NULL, "item not supported");
6731                 switch (type) {
6732                 case RTE_FLOW_ITEM_TYPE_VOID:
6733                         break;
6734                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6735                         ret = flow_dv_validate_item_port_id
6736                                         (dev, items, attr, item_flags, error);
6737                         if (ret < 0)
6738                                 return ret;
6739                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6740                         break;
6741                 case RTE_FLOW_ITEM_TYPE_ETH:
6742                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6743                                                           true, error);
6744                         if (ret < 0)
6745                                 return ret;
6746                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6747                                              MLX5_FLOW_LAYER_OUTER_L2;
6748                         if (items->mask != NULL && items->spec != NULL) {
6749                                 ether_type =
6750                                         ((const struct rte_flow_item_eth *)
6751                                          items->spec)->type;
6752                                 ether_type &=
6753                                         ((const struct rte_flow_item_eth *)
6754                                          items->mask)->type;
6755                                 ether_type = rte_be_to_cpu_16(ether_type);
6756                         } else {
6757                                 ether_type = 0;
6758                         }
6759                         break;
6760                 case RTE_FLOW_ITEM_TYPE_VLAN:
6761                         ret = flow_dv_validate_item_vlan(items, item_flags,
6762                                                          dev, error);
6763                         if (ret < 0)
6764                                 return ret;
6765                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6766                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6767                         if (items->mask != NULL && items->spec != NULL) {
6768                                 ether_type =
6769                                         ((const struct rte_flow_item_vlan *)
6770                                          items->spec)->inner_type;
6771                                 ether_type &=
6772                                         ((const struct rte_flow_item_vlan *)
6773                                          items->mask)->inner_type;
6774                                 ether_type = rte_be_to_cpu_16(ether_type);
6775                         } else {
6776                                 ether_type = 0;
6777                         }
6778                         /* Store outer VLAN mask for of_push_vlan action. */
6779                         if (!tunnel)
6780                                 vlan_m = items->mask;
6781                         break;
6782                 case RTE_FLOW_ITEM_TYPE_IPV4:
6783                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6784                                                   &item_flags, &tunnel);
6785                         ret = flow_dv_validate_item_ipv4(items, item_flags,
6786                                                          last_item, ether_type,
6787                                                          error);
6788                         if (ret < 0)
6789                                 return ret;
6790                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
6791                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
6792                         if (items->mask != NULL &&
6793                             ((const struct rte_flow_item_ipv4 *)
6794                              items->mask)->hdr.next_proto_id) {
6795                                 next_protocol =
6796                                         ((const struct rte_flow_item_ipv4 *)
6797                                          (items->spec))->hdr.next_proto_id;
6798                                 next_protocol &=
6799                                         ((const struct rte_flow_item_ipv4 *)
6800                                          (items->mask))->hdr.next_proto_id;
6801                         } else {
6802                                 /* Reset for inner layer. */
6803                                 next_protocol = 0xff;
6804                         }
6805                         break;
6806                 case RTE_FLOW_ITEM_TYPE_IPV6:
6807                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6808                                                   &item_flags, &tunnel);
6809                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
6810                                                            last_item,
6811                                                            ether_type,
6812                                                            &nic_ipv6_mask,
6813                                                            error);
6814                         if (ret < 0)
6815                                 return ret;
6816                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
6817                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
6818                         if (items->mask != NULL &&
6819                             ((const struct rte_flow_item_ipv6 *)
6820                              items->mask)->hdr.proto) {
6821                                 item_ipv6_proto =
6822                                         ((const struct rte_flow_item_ipv6 *)
6823                                          items->spec)->hdr.proto;
6824                                 next_protocol =
6825                                         ((const struct rte_flow_item_ipv6 *)
6826                                          items->spec)->hdr.proto;
6827                                 next_protocol &=
6828                                         ((const struct rte_flow_item_ipv6 *)
6829                                          items->mask)->hdr.proto;
6830                         } else {
6831                                 /* Reset for inner layer. */
6832                                 next_protocol = 0xff;
6833                         }
6834                         break;
6835                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
6836                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
6837                                                                   item_flags,
6838                                                                   error);
6839                         if (ret < 0)
6840                                 return ret;
6841                         last_item = tunnel ?
6842                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
6843                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
6844                         if (items->mask != NULL &&
6845                             ((const struct rte_flow_item_ipv6_frag_ext *)
6846                              items->mask)->hdr.next_header) {
6847                                 next_protocol =
6848                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6849                                  items->spec)->hdr.next_header;
6850                                 next_protocol &=
6851                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6852                                  items->mask)->hdr.next_header;
6853                         } else {
6854                                 /* Reset for inner layer. */
6855                                 next_protocol = 0xff;
6856                         }
6857                         break;
6858                 case RTE_FLOW_ITEM_TYPE_TCP:
6859                         ret = mlx5_flow_validate_item_tcp
6860                                                 (items, item_flags,
6861                                                  next_protocol,
6862                                                  &nic_tcp_mask,
6863                                                  error);
6864                         if (ret < 0)
6865                                 return ret;
6866                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
6867                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
6868                         break;
6869                 case RTE_FLOW_ITEM_TYPE_UDP:
6870                         ret = mlx5_flow_validate_item_udp(items, item_flags,
6871                                                           next_protocol,
6872                                                           error);
6873                         if (ret < 0)
6874                                 return ret;
6875                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
6876                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
6877                         break;
6878                 case RTE_FLOW_ITEM_TYPE_GRE:
6879                         ret = mlx5_flow_validate_item_gre(items, item_flags,
6880                                                           next_protocol, error);
6881                         if (ret < 0)
6882                                 return ret;
6883                         gre_item = items;
6884                         last_item = MLX5_FLOW_LAYER_GRE;
6885                         break;
6886                 case RTE_FLOW_ITEM_TYPE_NVGRE:
6887                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
6888                                                             next_protocol,
6889                                                             error);
6890                         if (ret < 0)
6891                                 return ret;
6892                         last_item = MLX5_FLOW_LAYER_NVGRE;
6893                         break;
6894                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6895                         ret = mlx5_flow_validate_item_gre_key
6896                                 (items, item_flags, gre_item, error);
6897                         if (ret < 0)
6898                                 return ret;
6899                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
6900                         break;
6901                 case RTE_FLOW_ITEM_TYPE_VXLAN:
6902                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
6903                                                             error);
6904                         if (ret < 0)
6905                                 return ret;
6906                         last_item = MLX5_FLOW_LAYER_VXLAN;
6907                         break;
6908                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6909                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
6910                                                                 item_flags, dev,
6911                                                                 error);
6912                         if (ret < 0)
6913                                 return ret;
6914                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
6915                         break;
6916                 case RTE_FLOW_ITEM_TYPE_GENEVE:
6917                         ret = mlx5_flow_validate_item_geneve(items,
6918                                                              item_flags, dev,
6919                                                              error);
6920                         if (ret < 0)
6921                                 return ret;
6922                         geneve_item = items;
6923                         last_item = MLX5_FLOW_LAYER_GENEVE;
6924                         break;
6925                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
6926                         ret = mlx5_flow_validate_item_geneve_opt(items,
6927                                                                  last_item,
6928                                                                  geneve_item,
6929                                                                  dev,
6930                                                                  error);
6931                         if (ret < 0)
6932                                 return ret;
6933                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
6934                         break;
6935                 case RTE_FLOW_ITEM_TYPE_MPLS:
6936                         ret = mlx5_flow_validate_item_mpls(dev, items,
6937                                                            item_flags,
6938                                                            last_item, error);
6939                         if (ret < 0)
6940                                 return ret;
6941                         last_item = MLX5_FLOW_LAYER_MPLS;
6942                         break;
6943
6944                 case RTE_FLOW_ITEM_TYPE_MARK:
6945                         ret = flow_dv_validate_item_mark(dev, items, attr,
6946                                                          error);
6947                         if (ret < 0)
6948                                 return ret;
6949                         last_item = MLX5_FLOW_ITEM_MARK;
6950                         break;
6951                 case RTE_FLOW_ITEM_TYPE_META:
6952                         ret = flow_dv_validate_item_meta(dev, items, attr,
6953                                                          error);
6954                         if (ret < 0)
6955                                 return ret;
6956                         last_item = MLX5_FLOW_ITEM_METADATA;
6957                         break;
6958                 case RTE_FLOW_ITEM_TYPE_ICMP:
6959                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
6960                                                            next_protocol,
6961                                                            error);
6962                         if (ret < 0)
6963                                 return ret;
6964                         last_item = MLX5_FLOW_LAYER_ICMP;
6965                         break;
6966                 case RTE_FLOW_ITEM_TYPE_ICMP6:
6967                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
6968                                                             next_protocol,
6969                                                             error);
6970                         if (ret < 0)
6971                                 return ret;
6972                         item_ipv6_proto = IPPROTO_ICMPV6;
6973                         last_item = MLX5_FLOW_LAYER_ICMP6;
6974                         break;
6975                 case RTE_FLOW_ITEM_TYPE_TAG:
6976                         ret = flow_dv_validate_item_tag(dev, items,
6977                                                         attr, error);
6978                         if (ret < 0)
6979                                 return ret;
6980                         last_item = MLX5_FLOW_ITEM_TAG;
6981                         break;
6982                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
6983                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
6984                         break;
6985                 case RTE_FLOW_ITEM_TYPE_GTP:
6986                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
6987                                                         error);
6988                         if (ret < 0)
6989                                 return ret;
6990                         gtp_item = items;
6991                         last_item = MLX5_FLOW_LAYER_GTP;
6992                         break;
6993                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
6994                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
6995                                                             gtp_item, attr,
6996                                                             error);
6997                         if (ret < 0)
6998                                 return ret;
6999                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
7000                         break;
7001                 case RTE_FLOW_ITEM_TYPE_ECPRI:
7002                         /* Capacity will be checked in the translate stage. */
7003                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
7004                                                             last_item,
7005                                                             ether_type,
7006                                                             &nic_ecpri_mask,
7007                                                             error);
7008                         if (ret < 0)
7009                                 return ret;
7010                         last_item = MLX5_FLOW_LAYER_ECPRI;
7011                         break;
7012                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
7013                         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY)
7014                                 return rte_flow_error_set
7015                                         (error, ENOTSUP,
7016                                          RTE_FLOW_ERROR_TYPE_ITEM,
7017                                          NULL, "multiple integrity items not supported");
7018                         ret = flow_dv_validate_item_integrity(dev, rule_items,
7019                                                               items, error);
7020                         if (ret < 0)
7021                                 return ret;
7022                         last_item = MLX5_FLOW_ITEM_INTEGRITY;
7023                         break;
7024                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
7025                         ret = flow_dv_validate_item_aso_ct(dev, items,
7026                                                            &item_flags, error);
7027                         if (ret < 0)
7028                                 return ret;
7029                         break;
7030                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
7031                         /* tunnel offload item was processed before
7032                          * list it here as a supported type
7033                          */
7034                         break;
7035                 default:
7036                         return rte_flow_error_set(error, ENOTSUP,
7037                                                   RTE_FLOW_ERROR_TYPE_ITEM,
7038                                                   NULL, "item not supported");
7039                 }
7040                 item_flags |= last_item;
7041         }
7042         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
7043                 int type = actions->type;
7044                 bool shared_count = false;
7045
7046                 if (!mlx5_flow_os_action_supported(type))
7047                         return rte_flow_error_set(error, ENOTSUP,
7048                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7049                                                   actions,
7050                                                   "action not supported");
7051                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
7052                         return rte_flow_error_set(error, ENOTSUP,
7053                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7054                                                   actions, "too many actions");
7055                 if (action_flags &
7056                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
7057                         return rte_flow_error_set(error, ENOTSUP,
7058                                 RTE_FLOW_ERROR_TYPE_ACTION,
7059                                 NULL, "meter action with policy "
7060                                 "must be the last action");
7061                 switch (type) {
7062                 case RTE_FLOW_ACTION_TYPE_VOID:
7063                         break;
7064                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7065                         ret = flow_dv_validate_action_port_id(dev,
7066                                                               action_flags,
7067                                                               actions,
7068                                                               attr,
7069                                                               error);
7070                         if (ret)
7071                                 return ret;
7072                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7073                         ++actions_n;
7074                         break;
7075                 case RTE_FLOW_ACTION_TYPE_FLAG:
7076                         ret = flow_dv_validate_action_flag(dev, action_flags,
7077                                                            attr, error);
7078                         if (ret < 0)
7079                                 return ret;
7080                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7081                                 /* Count all modify-header actions as one. */
7082                                 if (!(action_flags &
7083                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7084                                         ++actions_n;
7085                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
7086                                                 MLX5_FLOW_ACTION_MARK_EXT;
7087                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7088                                         modify_after_mirror = 1;
7089
7090                         } else {
7091                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
7092                                 ++actions_n;
7093                         }
7094                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7095                         break;
7096                 case RTE_FLOW_ACTION_TYPE_MARK:
7097                         ret = flow_dv_validate_action_mark(dev, actions,
7098                                                            action_flags,
7099                                                            attr, error);
7100                         if (ret < 0)
7101                                 return ret;
7102                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7103                                 /* Count all modify-header actions as one. */
7104                                 if (!(action_flags &
7105                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7106                                         ++actions_n;
7107                                 action_flags |= MLX5_FLOW_ACTION_MARK |
7108                                                 MLX5_FLOW_ACTION_MARK_EXT;
7109                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7110                                         modify_after_mirror = 1;
7111                         } else {
7112                                 action_flags |= MLX5_FLOW_ACTION_MARK;
7113                                 ++actions_n;
7114                         }
7115                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7116                         break;
7117                 case RTE_FLOW_ACTION_TYPE_SET_META:
7118                         ret = flow_dv_validate_action_set_meta(dev, actions,
7119                                                                action_flags,
7120                                                                attr, error);
7121                         if (ret < 0)
7122                                 return ret;
7123                         /* Count all modify-header actions as one action. */
7124                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7125                                 ++actions_n;
7126                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7127                                 modify_after_mirror = 1;
7128                         action_flags |= MLX5_FLOW_ACTION_SET_META;
7129                         rw_act_num += MLX5_ACT_NUM_SET_META;
7130                         break;
7131                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7132                         ret = flow_dv_validate_action_set_tag(dev, actions,
7133                                                               action_flags,
7134                                                               attr, error);
7135                         if (ret < 0)
7136                                 return ret;
7137                         /* Count all modify-header actions as one action. */
7138                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7139                                 ++actions_n;
7140                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7141                                 modify_after_mirror = 1;
7142                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7143                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7144                         break;
7145                 case RTE_FLOW_ACTION_TYPE_DROP:
7146                         ret = mlx5_flow_validate_action_drop(action_flags,
7147                                                              attr, error);
7148                         if (ret < 0)
7149                                 return ret;
7150                         action_flags |= MLX5_FLOW_ACTION_DROP;
7151                         ++actions_n;
7152                         break;
7153                 case RTE_FLOW_ACTION_TYPE_QUEUE:
7154                         ret = mlx5_flow_validate_action_queue(actions,
7155                                                               action_flags, dev,
7156                                                               attr, error);
7157                         if (ret < 0)
7158                                 return ret;
7159                         queue_index = ((const struct rte_flow_action_queue *)
7160                                                         (actions->conf))->index;
7161                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
7162                         ++actions_n;
7163                         break;
7164                 case RTE_FLOW_ACTION_TYPE_RSS:
7165                         rss = actions->conf;
7166                         ret = mlx5_flow_validate_action_rss(actions,
7167                                                             action_flags, dev,
7168                                                             attr, item_flags,
7169                                                             error);
7170                         if (ret < 0)
7171                                 return ret;
7172                         if (rss && sample_rss &&
7173                             (sample_rss->level != rss->level ||
7174                             sample_rss->types != rss->types))
7175                                 return rte_flow_error_set(error, ENOTSUP,
7176                                         RTE_FLOW_ERROR_TYPE_ACTION,
7177                                         NULL,
7178                                         "Can't use the different RSS types "
7179                                         "or level in the same flow");
7180                         if (rss != NULL && rss->queue_num)
7181                                 queue_index = rss->queue[0];
7182                         action_flags |= MLX5_FLOW_ACTION_RSS;
7183                         ++actions_n;
7184                         break;
7185                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7186                         ret =
7187                         mlx5_flow_validate_action_default_miss(action_flags,
7188                                         attr, error);
7189                         if (ret < 0)
7190                                 return ret;
7191                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7192                         ++actions_n;
7193                         break;
7194                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
7195                 case RTE_FLOW_ACTION_TYPE_COUNT:
7196                         shared_count = is_shared_action_count(actions);
7197                         ret = flow_dv_validate_action_count(dev, shared_count,
7198                                                             action_flags,
7199                                                             error);
7200                         if (ret < 0)
7201                                 return ret;
7202                         action_flags |= MLX5_FLOW_ACTION_COUNT;
7203                         ++actions_n;
7204                         break;
7205                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7206                         if (flow_dv_validate_action_pop_vlan(dev,
7207                                                              action_flags,
7208                                                              actions,
7209                                                              item_flags, attr,
7210                                                              error))
7211                                 return -rte_errno;
7212                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7213                                 modify_after_mirror = 1;
7214                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7215                         ++actions_n;
7216                         break;
7217                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7218                         ret = flow_dv_validate_action_push_vlan(dev,
7219                                                                 action_flags,
7220                                                                 vlan_m,
7221                                                                 actions, attr,
7222                                                                 error);
7223                         if (ret < 0)
7224                                 return ret;
7225                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7226                                 modify_after_mirror = 1;
7227                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7228                         ++actions_n;
7229                         break;
7230                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7231                         ret = flow_dv_validate_action_set_vlan_pcp
7232                                                 (action_flags, actions, error);
7233                         if (ret < 0)
7234                                 return ret;
7235                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7236                                 modify_after_mirror = 1;
7237                         /* Count PCP with push_vlan command. */
7238                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
7239                         break;
7240                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7241                         ret = flow_dv_validate_action_set_vlan_vid
7242                                                 (item_flags, action_flags,
7243                                                  actions, error);
7244                         if (ret < 0)
7245                                 return ret;
7246                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7247                                 modify_after_mirror = 1;
7248                         /* Count VID with push_vlan command. */
7249                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7250                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
7251                         break;
7252                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7253                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7254                         ret = flow_dv_validate_action_l2_encap(dev,
7255                                                                action_flags,
7256                                                                actions, attr,
7257                                                                error);
7258                         if (ret < 0)
7259                                 return ret;
7260                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
7261                         ++actions_n;
7262                         break;
7263                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7264                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7265                         ret = flow_dv_validate_action_decap(dev, action_flags,
7266                                                             actions, item_flags,
7267                                                             attr, error);
7268                         if (ret < 0)
7269                                 return ret;
7270                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7271                                 modify_after_mirror = 1;
7272                         action_flags |= MLX5_FLOW_ACTION_DECAP;
7273                         ++actions_n;
7274                         break;
7275                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7276                         ret = flow_dv_validate_action_raw_encap_decap
7277                                 (dev, NULL, actions->conf, attr, &action_flags,
7278                                  &actions_n, actions, item_flags, error);
7279                         if (ret < 0)
7280                                 return ret;
7281                         break;
7282                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7283                         decap = actions->conf;
7284                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
7285                                 ;
7286                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7287                                 encap = NULL;
7288                                 actions--;
7289                         } else {
7290                                 encap = actions->conf;
7291                         }
7292                         ret = flow_dv_validate_action_raw_encap_decap
7293                                            (dev,
7294                                             decap ? decap : &empty_decap, encap,
7295                                             attr, &action_flags, &actions_n,
7296                                             actions, item_flags, error);
7297                         if (ret < 0)
7298                                 return ret;
7299                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7300                             (action_flags & MLX5_FLOW_ACTION_DECAP))
7301                                 modify_after_mirror = 1;
7302                         break;
7303                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7304                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7305                         ret = flow_dv_validate_action_modify_mac(action_flags,
7306                                                                  actions,
7307                                                                  item_flags,
7308                                                                  error);
7309                         if (ret < 0)
7310                                 return ret;
7311                         /* Count all modify-header actions as one action. */
7312                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7313                                 ++actions_n;
7314                         action_flags |= actions->type ==
7315                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7316                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
7317                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
7318                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7319                                 modify_after_mirror = 1;
7320                         /*
7321                          * Even if the source and destination MAC addresses have
7322                          * overlap in the header with 4B alignment, the convert
7323                          * function will handle them separately and 4 SW actions
7324                          * will be created. And 2 actions will be added each
7325                          * time no matter how many bytes of address will be set.
7326                          */
7327                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
7328                         break;
7329                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7330                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7331                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
7332                                                                   actions,
7333                                                                   item_flags,
7334                                                                   error);
7335                         if (ret < 0)
7336                                 return ret;
7337                         /* Count all modify-header actions as one action. */
7338                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7339                                 ++actions_n;
7340                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7341                                 modify_after_mirror = 1;
7342                         action_flags |= actions->type ==
7343                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7344                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
7345                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
7346                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
7347                         break;
7348                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7349                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7350                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
7351                                                                   actions,
7352                                                                   item_flags,
7353                                                                   error);
7354                         if (ret < 0)
7355                                 return ret;
7356                         if (item_ipv6_proto == IPPROTO_ICMPV6)
7357                                 return rte_flow_error_set(error, ENOTSUP,
7358                                         RTE_FLOW_ERROR_TYPE_ACTION,
7359                                         actions,
7360                                         "Can't change header "
7361                                         "with ICMPv6 proto");
7362                         /* Count all modify-header actions as one action. */
7363                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7364                                 ++actions_n;
7365                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7366                                 modify_after_mirror = 1;
7367                         action_flags |= actions->type ==
7368                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7369                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7370                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
7371                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
7372                         break;
7373                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7374                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7375                         ret = flow_dv_validate_action_modify_tp(action_flags,
7376                                                                 actions,
7377                                                                 item_flags,
7378                                                                 error);
7379                         if (ret < 0)
7380                                 return ret;
7381                         /* Count all modify-header actions as one action. */
7382                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7383                                 ++actions_n;
7384                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7385                                 modify_after_mirror = 1;
7386                         action_flags |= actions->type ==
7387                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7388                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
7389                                                 MLX5_FLOW_ACTION_SET_TP_DST;
7390                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7391                         break;
7392                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7393                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7394                         ret = flow_dv_validate_action_modify_ttl(action_flags,
7395                                                                  actions,
7396                                                                  item_flags,
7397                                                                  error);
7398                         if (ret < 0)
7399                                 return ret;
7400                         /* Count all modify-header actions as one action. */
7401                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7402                                 ++actions_n;
7403                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7404                                 modify_after_mirror = 1;
7405                         action_flags |= actions->type ==
7406                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
7407                                                 MLX5_FLOW_ACTION_SET_TTL :
7408                                                 MLX5_FLOW_ACTION_DEC_TTL;
7409                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7410                         break;
7411                 case RTE_FLOW_ACTION_TYPE_JUMP:
7412                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7413                                                            action_flags,
7414                                                            attr, external,
7415                                                            error);
7416                         if (ret)
7417                                 return ret;
7418                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7419                             fdb_mirror_limit)
7420                                 return rte_flow_error_set(error, EINVAL,
7421                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7422                                                   NULL,
7423                                                   "sample and jump action combination is not supported");
7424                         ++actions_n;
7425                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7426                         break;
7427                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7428                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7429                         ret = flow_dv_validate_action_modify_tcp_seq
7430                                                                 (action_flags,
7431                                                                  actions,
7432                                                                  item_flags,
7433                                                                  error);
7434                         if (ret < 0)
7435                                 return ret;
7436                         /* Count all modify-header actions as one action. */
7437                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7438                                 ++actions_n;
7439                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7440                                 modify_after_mirror = 1;
7441                         action_flags |= actions->type ==
7442                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7443                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7444                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7445                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7446                         break;
7447                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7448                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7449                         ret = flow_dv_validate_action_modify_tcp_ack
7450                                                                 (action_flags,
7451                                                                  actions,
7452                                                                  item_flags,
7453                                                                  error);
7454                         if (ret < 0)
7455                                 return ret;
7456                         /* Count all modify-header actions as one action. */
7457                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7458                                 ++actions_n;
7459                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7460                                 modify_after_mirror = 1;
7461                         action_flags |= actions->type ==
7462                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7463                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
7464                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7465                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7466                         break;
7467                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7468                         break;
7469                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7470                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7471                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7472                         break;
7473                 case RTE_FLOW_ACTION_TYPE_METER:
7474                         ret = mlx5_flow_validate_action_meter(dev,
7475                                                               action_flags,
7476                                                               actions, attr,
7477                                                               &def_policy,
7478                                                               error);
7479                         if (ret < 0)
7480                                 return ret;
7481                         action_flags |= MLX5_FLOW_ACTION_METER;
7482                         if (!def_policy)
7483                                 action_flags |=
7484                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7485                         ++actions_n;
7486                         /* Meter action will add one more TAG action. */
7487                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7488                         break;
7489                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7490                         if (!attr->transfer && !attr->group)
7491                                 return rte_flow_error_set(error, ENOTSUP,
7492                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7493                                                                            NULL,
7494                           "Shared ASO age action is not supported for group 0");
7495                         if (action_flags & MLX5_FLOW_ACTION_AGE)
7496                                 return rte_flow_error_set
7497                                                   (error, EINVAL,
7498                                                    RTE_FLOW_ERROR_TYPE_ACTION,
7499                                                    NULL,
7500                                                    "duplicate age actions set");
7501                         action_flags |= MLX5_FLOW_ACTION_AGE;
7502                         ++actions_n;
7503                         break;
7504                 case RTE_FLOW_ACTION_TYPE_AGE:
7505                         ret = flow_dv_validate_action_age(action_flags,
7506                                                           actions, dev,
7507                                                           error);
7508                         if (ret < 0)
7509                                 return ret;
7510                         /*
7511                          * Validate the regular AGE action (using counter)
7512                          * mutual exclusion with share counter actions.
7513                          */
7514                         if (!priv->sh->flow_hit_aso_en) {
7515                                 if (shared_count)
7516                                         return rte_flow_error_set
7517                                                 (error, EINVAL,
7518                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7519                                                 NULL,
7520                                                 "old age and shared count combination is not supported");
7521                                 if (sample_count)
7522                                         return rte_flow_error_set
7523                                                 (error, EINVAL,
7524                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7525                                                 NULL,
7526                                                 "old age action and count must be in the same sub flow");
7527                         }
7528                         action_flags |= MLX5_FLOW_ACTION_AGE;
7529                         ++actions_n;
7530                         break;
7531                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7532                         ret = flow_dv_validate_action_modify_ipv4_dscp
7533                                                          (action_flags,
7534                                                           actions,
7535                                                           item_flags,
7536                                                           error);
7537                         if (ret < 0)
7538                                 return ret;
7539                         /* Count all modify-header actions as one action. */
7540                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7541                                 ++actions_n;
7542                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7543                                 modify_after_mirror = 1;
7544                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7545                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7546                         break;
7547                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7548                         ret = flow_dv_validate_action_modify_ipv6_dscp
7549                                                                 (action_flags,
7550                                                                  actions,
7551                                                                  item_flags,
7552                                                                  error);
7553                         if (ret < 0)
7554                                 return ret;
7555                         /* Count all modify-header actions as one action. */
7556                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7557                                 ++actions_n;
7558                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7559                                 modify_after_mirror = 1;
7560                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7561                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7562                         break;
7563                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7564                         ret = flow_dv_validate_action_sample(&action_flags,
7565                                                              actions, dev,
7566                                                              attr, item_flags,
7567                                                              rss, &sample_rss,
7568                                                              &sample_count,
7569                                                              &fdb_mirror_limit,
7570                                                              error);
7571                         if (ret < 0)
7572                                 return ret;
7573                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7574                         ++actions_n;
7575                         break;
7576                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7577                         ret = flow_dv_validate_action_modify_field(dev,
7578                                                                    action_flags,
7579                                                                    actions,
7580                                                                    attr,
7581                                                                    error);
7582                         if (ret < 0)
7583                                 return ret;
7584                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7585                                 modify_after_mirror = 1;
7586                         /* Count all modify-header actions as one action. */
7587                         if (!(action_flags & MLX5_FLOW_ACTION_MODIFY_FIELD))
7588                                 ++actions_n;
7589                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7590                         rw_act_num += ret;
7591                         break;
7592                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7593                         ret = flow_dv_validate_action_aso_ct(dev, action_flags,
7594                                                              item_flags, attr,
7595                                                              error);
7596                         if (ret < 0)
7597                                 return ret;
7598                         action_flags |= MLX5_FLOW_ACTION_CT;
7599                         break;
7600                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7601                         /* tunnel offload action was processed before
7602                          * list it here as a supported type
7603                          */
7604                         break;
7605                 default:
7606                         return rte_flow_error_set(error, ENOTSUP,
7607                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7608                                                   actions,
7609                                                   "action not supported");
7610                 }
7611         }
7612         /*
7613          * Validate actions in flow rules
7614          * - Explicit decap action is prohibited by the tunnel offload API.
7615          * - Drop action in tunnel steer rule is prohibited by the API.
7616          * - Application cannot use MARK action because it's value can mask
7617          *   tunnel default miss nitification.
7618          * - JUMP in tunnel match rule has no support in current PMD
7619          *   implementation.
7620          * - TAG & META are reserved for future uses.
7621          */
7622         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7623                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
7624                                             MLX5_FLOW_ACTION_MARK     |
7625                                             MLX5_FLOW_ACTION_SET_TAG  |
7626                                             MLX5_FLOW_ACTION_SET_META |
7627                                             MLX5_FLOW_ACTION_DROP;
7628
7629                 if (action_flags & bad_actions_mask)
7630                         return rte_flow_error_set
7631                                         (error, EINVAL,
7632                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7633                                         "Invalid RTE action in tunnel "
7634                                         "set decap rule");
7635                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7636                         return rte_flow_error_set
7637                                         (error, EINVAL,
7638                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7639                                         "tunnel set decap rule must terminate "
7640                                         "with JUMP");
7641                 if (!attr->ingress)
7642                         return rte_flow_error_set
7643                                         (error, EINVAL,
7644                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7645                                         "tunnel flows for ingress traffic only");
7646         }
7647         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7648                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
7649                                             MLX5_FLOW_ACTION_MARK    |
7650                                             MLX5_FLOW_ACTION_SET_TAG |
7651                                             MLX5_FLOW_ACTION_SET_META;
7652
7653                 if (action_flags & bad_actions_mask)
7654                         return rte_flow_error_set
7655                                         (error, EINVAL,
7656                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7657                                         "Invalid RTE action in tunnel "
7658                                         "set match rule");
7659         }
7660         /*
7661          * Validate the drop action mutual exclusion with other actions.
7662          * Drop action is mutually-exclusive with any other action, except for
7663          * Count action.
7664          * Drop action compatibility with tunnel offload was already validated.
7665          */
7666         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7667                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
7668         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7669             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7670                 return rte_flow_error_set(error, EINVAL,
7671                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7672                                           "Drop action is mutually-exclusive "
7673                                           "with any other action, except for "
7674                                           "Count action");
7675         /* Eswitch has few restrictions on using items and actions */
7676         if (attr->transfer) {
7677                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7678                     action_flags & MLX5_FLOW_ACTION_FLAG)
7679                         return rte_flow_error_set(error, ENOTSUP,
7680                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7681                                                   NULL,
7682                                                   "unsupported action FLAG");
7683                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7684                     action_flags & MLX5_FLOW_ACTION_MARK)
7685                         return rte_flow_error_set(error, ENOTSUP,
7686                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7687                                                   NULL,
7688                                                   "unsupported action MARK");
7689                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7690                         return rte_flow_error_set(error, ENOTSUP,
7691                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7692                                                   NULL,
7693                                                   "unsupported action QUEUE");
7694                 if (action_flags & MLX5_FLOW_ACTION_RSS)
7695                         return rte_flow_error_set(error, ENOTSUP,
7696                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7697                                                   NULL,
7698                                                   "unsupported action RSS");
7699                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7700                         return rte_flow_error_set(error, EINVAL,
7701                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7702                                                   actions,
7703                                                   "no fate action is found");
7704         } else {
7705                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7706                         return rte_flow_error_set(error, EINVAL,
7707                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7708                                                   actions,
7709                                                   "no fate action is found");
7710         }
7711         /*
7712          * Continue validation for Xcap and VLAN actions.
7713          * If hairpin is working in explicit TX rule mode, there is no actions
7714          * splitting and the validation of hairpin ingress flow should be the
7715          * same as other standard flows.
7716          */
7717         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7718                              MLX5_FLOW_VLAN_ACTIONS)) &&
7719             (queue_index == 0xFFFF ||
7720              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7721              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7722              conf->tx_explicit != 0))) {
7723                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7724                     MLX5_FLOW_XCAP_ACTIONS)
7725                         return rte_flow_error_set(error, ENOTSUP,
7726                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7727                                                   NULL, "encap and decap "
7728                                                   "combination aren't supported");
7729                 if (!attr->transfer && attr->ingress) {
7730                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7731                                 return rte_flow_error_set
7732                                                 (error, ENOTSUP,
7733                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7734                                                  NULL, "encap is not supported"
7735                                                  " for ingress traffic");
7736                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7737                                 return rte_flow_error_set
7738                                                 (error, ENOTSUP,
7739                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7740                                                  NULL, "push VLAN action not "
7741                                                  "supported for ingress");
7742                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7743                                         MLX5_FLOW_VLAN_ACTIONS)
7744                                 return rte_flow_error_set
7745                                                 (error, ENOTSUP,
7746                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7747                                                  NULL, "no support for "
7748                                                  "multiple VLAN actions");
7749                 }
7750         }
7751         if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
7752                 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
7753                         ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
7754                         attr->ingress)
7755                         return rte_flow_error_set
7756                                 (error, ENOTSUP,
7757                                 RTE_FLOW_ERROR_TYPE_ACTION,
7758                                 NULL, "fate action not supported for "
7759                                 "meter with policy");
7760                 if (attr->egress) {
7761                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
7762                                 return rte_flow_error_set
7763                                         (error, ENOTSUP,
7764                                         RTE_FLOW_ERROR_TYPE_ACTION,
7765                                         NULL, "modify header action in egress "
7766                                         "cannot be done before meter action");
7767                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7768                                 return rte_flow_error_set
7769                                         (error, ENOTSUP,
7770                                         RTE_FLOW_ERROR_TYPE_ACTION,
7771                                         NULL, "encap action in egress "
7772                                         "cannot be done before meter action");
7773                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7774                                 return rte_flow_error_set
7775                                         (error, ENOTSUP,
7776                                         RTE_FLOW_ERROR_TYPE_ACTION,
7777                                         NULL, "push vlan action in egress "
7778                                         "cannot be done before meter action");
7779                 }
7780         }
7781         /*
7782          * Hairpin flow will add one more TAG action in TX implicit mode.
7783          * In TX explicit mode, there will be no hairpin flow ID.
7784          */
7785         if (hairpin > 0)
7786                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7787         /* extra metadata enabled: one more TAG action will be add. */
7788         if (dev_conf->dv_flow_en &&
7789             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
7790             mlx5_flow_ext_mreg_supported(dev))
7791                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7792         if (rw_act_num >
7793                         flow_dv_modify_hdr_action_max(dev, is_root)) {
7794                 return rte_flow_error_set(error, ENOTSUP,
7795                                           RTE_FLOW_ERROR_TYPE_ACTION,
7796                                           NULL, "too many header modify"
7797                                           " actions to support");
7798         }
7799         /* Eswitch egress mirror and modify flow has limitation on CX5 */
7800         if (fdb_mirror_limit && modify_after_mirror)
7801                 return rte_flow_error_set(error, EINVAL,
7802                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7803                                 "sample before modify action is not supported");
7804         return 0;
7805 }
7806
7807 /**
7808  * Internal preparation function. Allocates the DV flow size,
7809  * this size is constant.
7810  *
7811  * @param[in] dev
7812  *   Pointer to the rte_eth_dev structure.
7813  * @param[in] attr
7814  *   Pointer to the flow attributes.
7815  * @param[in] items
7816  *   Pointer to the list of items.
7817  * @param[in] actions
7818  *   Pointer to the list of actions.
7819  * @param[out] error
7820  *   Pointer to the error structure.
7821  *
7822  * @return
7823  *   Pointer to mlx5_flow object on success,
7824  *   otherwise NULL and rte_errno is set.
7825  */
7826 static struct mlx5_flow *
7827 flow_dv_prepare(struct rte_eth_dev *dev,
7828                 const struct rte_flow_attr *attr __rte_unused,
7829                 const struct rte_flow_item items[] __rte_unused,
7830                 const struct rte_flow_action actions[] __rte_unused,
7831                 struct rte_flow_error *error)
7832 {
7833         uint32_t handle_idx = 0;
7834         struct mlx5_flow *dev_flow;
7835         struct mlx5_flow_handle *dev_handle;
7836         struct mlx5_priv *priv = dev->data->dev_private;
7837         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
7838
7839         MLX5_ASSERT(wks);
7840         wks->skip_matcher_reg = 0;
7841         /* In case of corrupting the memory. */
7842         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
7843                 rte_flow_error_set(error, ENOSPC,
7844                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7845                                    "not free temporary device flow");
7846                 return NULL;
7847         }
7848         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
7849                                    &handle_idx);
7850         if (!dev_handle) {
7851                 rte_flow_error_set(error, ENOMEM,
7852                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7853                                    "not enough memory to create flow handle");
7854                 return NULL;
7855         }
7856         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
7857         dev_flow = &wks->flows[wks->flow_idx++];
7858         memset(dev_flow, 0, sizeof(*dev_flow));
7859         dev_flow->handle = dev_handle;
7860         dev_flow->handle_idx = handle_idx;
7861         /*
7862          * In some old rdma-core releases, before continuing, a check of the
7863          * length of matching parameter will be done at first. It needs to use
7864          * the length without misc4 param. If the flow has misc4 support, then
7865          * the length needs to be adjusted accordingly. Each param member is
7866          * aligned with a 64B boundary naturally.
7867          */
7868         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
7869                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
7870         dev_flow->ingress = attr->ingress;
7871         dev_flow->dv.transfer = attr->transfer;
7872         return dev_flow;
7873 }
7874
7875 #ifdef RTE_LIBRTE_MLX5_DEBUG
7876 /**
7877  * Sanity check for match mask and value. Similar to check_valid_spec() in
7878  * kernel driver. If unmasked bit is present in value, it returns failure.
7879  *
7880  * @param match_mask
7881  *   pointer to match mask buffer.
7882  * @param match_value
7883  *   pointer to match value buffer.
7884  *
7885  * @return
7886  *   0 if valid, -EINVAL otherwise.
7887  */
7888 static int
7889 flow_dv_check_valid_spec(void *match_mask, void *match_value)
7890 {
7891         uint8_t *m = match_mask;
7892         uint8_t *v = match_value;
7893         unsigned int i;
7894
7895         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
7896                 if (v[i] & ~m[i]) {
7897                         DRV_LOG(ERR,
7898                                 "match_value differs from match_criteria"
7899                                 " %p[%u] != %p[%u]",
7900                                 match_value, i, match_mask, i);
7901                         return -EINVAL;
7902                 }
7903         }
7904         return 0;
7905 }
7906 #endif
7907
7908 /**
7909  * Add match of ip_version.
7910  *
7911  * @param[in] group
7912  *   Flow group.
7913  * @param[in] headers_v
7914  *   Values header pointer.
7915  * @param[in] headers_m
7916  *   Masks header pointer.
7917  * @param[in] ip_version
7918  *   The IP version to set.
7919  */
7920 static inline void
7921 flow_dv_set_match_ip_version(uint32_t group,
7922                              void *headers_v,
7923                              void *headers_m,
7924                              uint8_t ip_version)
7925 {
7926         if (group == 0)
7927                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
7928         else
7929                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
7930                          ip_version);
7931         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
7932         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
7933         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
7934 }
7935
7936 /**
7937  * Add Ethernet item to matcher and to the value.
7938  *
7939  * @param[in, out] matcher
7940  *   Flow matcher.
7941  * @param[in, out] key
7942  *   Flow matcher value.
7943  * @param[in] item
7944  *   Flow pattern to translate.
7945  * @param[in] inner
7946  *   Item is inner pattern.
7947  */
7948 static void
7949 flow_dv_translate_item_eth(void *matcher, void *key,
7950                            const struct rte_flow_item *item, int inner,
7951                            uint32_t group)
7952 {
7953         const struct rte_flow_item_eth *eth_m = item->mask;
7954         const struct rte_flow_item_eth *eth_v = item->spec;
7955         const struct rte_flow_item_eth nic_mask = {
7956                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7957                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7958                 .type = RTE_BE16(0xffff),
7959                 .has_vlan = 0,
7960         };
7961         void *hdrs_m;
7962         void *hdrs_v;
7963         char *l24_v;
7964         unsigned int i;
7965
7966         if (!eth_v)
7967                 return;
7968         if (!eth_m)
7969                 eth_m = &nic_mask;
7970         if (inner) {
7971                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7972                                          inner_headers);
7973                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7974         } else {
7975                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7976                                          outer_headers);
7977                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7978         }
7979         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
7980                &eth_m->dst, sizeof(eth_m->dst));
7981         /* The value must be in the range of the mask. */
7982         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
7983         for (i = 0; i < sizeof(eth_m->dst); ++i)
7984                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
7985         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
7986                &eth_m->src, sizeof(eth_m->src));
7987         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
7988         /* The value must be in the range of the mask. */
7989         for (i = 0; i < sizeof(eth_m->dst); ++i)
7990                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
7991         /*
7992          * HW supports match on one Ethertype, the Ethertype following the last
7993          * VLAN tag of the packet (see PRM).
7994          * Set match on ethertype only if ETH header is not followed by VLAN.
7995          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
7996          * ethertype, and use ip_version field instead.
7997          * eCPRI over Ether layer will use type value 0xAEFE.
7998          */
7999         if (eth_m->type == 0xFFFF) {
8000                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
8001                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8002                 switch (eth_v->type) {
8003                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8004                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8005                         return;
8006                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
8007                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8008                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8009                         return;
8010                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8011                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8012                         return;
8013                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8014                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8015                         return;
8016                 default:
8017                         break;
8018                 }
8019         }
8020         if (eth_m->has_vlan) {
8021                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8022                 if (eth_v->has_vlan) {
8023                         /*
8024                          * Here, when also has_more_vlan field in VLAN item is
8025                          * not set, only single-tagged packets will be matched.
8026                          */
8027                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8028                         return;
8029                 }
8030         }
8031         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8032                  rte_be_to_cpu_16(eth_m->type));
8033         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
8034         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
8035 }
8036
8037 /**
8038  * Add VLAN item to matcher and to the value.
8039  *
8040  * @param[in, out] dev_flow
8041  *   Flow descriptor.
8042  * @param[in, out] matcher
8043  *   Flow matcher.
8044  * @param[in, out] key
8045  *   Flow matcher value.
8046  * @param[in] item
8047  *   Flow pattern to translate.
8048  * @param[in] inner
8049  *   Item is inner pattern.
8050  */
8051 static void
8052 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
8053                             void *matcher, void *key,
8054                             const struct rte_flow_item *item,
8055                             int inner, uint32_t group)
8056 {
8057         const struct rte_flow_item_vlan *vlan_m = item->mask;
8058         const struct rte_flow_item_vlan *vlan_v = item->spec;
8059         void *hdrs_m;
8060         void *hdrs_v;
8061         uint16_t tci_m;
8062         uint16_t tci_v;
8063
8064         if (inner) {
8065                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8066                                          inner_headers);
8067                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8068         } else {
8069                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8070                                          outer_headers);
8071                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8072                 /*
8073                  * This is workaround, masks are not supported,
8074                  * and pre-validated.
8075                  */
8076                 if (vlan_v)
8077                         dev_flow->handle->vf_vlan.tag =
8078                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
8079         }
8080         /*
8081          * When VLAN item exists in flow, mark packet as tagged,
8082          * even if TCI is not specified.
8083          */
8084         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
8085                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8086                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8087         }
8088         if (!vlan_v)
8089                 return;
8090         if (!vlan_m)
8091                 vlan_m = &rte_flow_item_vlan_mask;
8092         tci_m = rte_be_to_cpu_16(vlan_m->tci);
8093         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
8094         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
8095         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
8096         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
8097         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
8098         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
8099         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
8100         /*
8101          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8102          * ethertype, and use ip_version field instead.
8103          */
8104         if (vlan_m->inner_type == 0xFFFF) {
8105                 switch (vlan_v->inner_type) {
8106                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8107                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8108                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8109                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8110                         return;
8111                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8112                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8113                         return;
8114                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8115                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8116                         return;
8117                 default:
8118                         break;
8119                 }
8120         }
8121         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
8122                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8123                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8124                 /* Only one vlan_tag bit can be set. */
8125                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8126                 return;
8127         }
8128         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8129                  rte_be_to_cpu_16(vlan_m->inner_type));
8130         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
8131                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
8132 }
8133
8134 /**
8135  * Add IPV4 item to matcher and to the value.
8136  *
8137  * @param[in, out] matcher
8138  *   Flow matcher.
8139  * @param[in, out] key
8140  *   Flow matcher value.
8141  * @param[in] item
8142  *   Flow pattern to translate.
8143  * @param[in] inner
8144  *   Item is inner pattern.
8145  * @param[in] group
8146  *   The group to insert the rule.
8147  */
8148 static void
8149 flow_dv_translate_item_ipv4(void *matcher, void *key,
8150                             const struct rte_flow_item *item,
8151                             int inner, uint32_t group)
8152 {
8153         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
8154         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
8155         const struct rte_flow_item_ipv4 nic_mask = {
8156                 .hdr = {
8157                         .src_addr = RTE_BE32(0xffffffff),
8158                         .dst_addr = RTE_BE32(0xffffffff),
8159                         .type_of_service = 0xff,
8160                         .next_proto_id = 0xff,
8161                         .time_to_live = 0xff,
8162                 },
8163         };
8164         void *headers_m;
8165         void *headers_v;
8166         char *l24_m;
8167         char *l24_v;
8168         uint8_t tos;
8169
8170         if (inner) {
8171                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8172                                          inner_headers);
8173                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8174         } else {
8175                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8176                                          outer_headers);
8177                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8178         }
8179         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
8180         if (!ipv4_v)
8181                 return;
8182         if (!ipv4_m)
8183                 ipv4_m = &nic_mask;
8184         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8185                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8186         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8187                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8188         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
8189         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
8190         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8191                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8192         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8193                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8194         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
8195         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
8196         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
8197         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
8198                  ipv4_m->hdr.type_of_service);
8199         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
8200         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
8201                  ipv4_m->hdr.type_of_service >> 2);
8202         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
8203         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8204                  ipv4_m->hdr.next_proto_id);
8205         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8206                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
8207         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8208                  ipv4_m->hdr.time_to_live);
8209         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8210                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
8211         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8212                  !!(ipv4_m->hdr.fragment_offset));
8213         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8214                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
8215 }
8216
8217 /**
8218  * Add IPV6 item to matcher and to the value.
8219  *
8220  * @param[in, out] matcher
8221  *   Flow matcher.
8222  * @param[in, out] key
8223  *   Flow matcher value.
8224  * @param[in] item
8225  *   Flow pattern to translate.
8226  * @param[in] inner
8227  *   Item is inner pattern.
8228  * @param[in] group
8229  *   The group to insert the rule.
8230  */
8231 static void
8232 flow_dv_translate_item_ipv6(void *matcher, void *key,
8233                             const struct rte_flow_item *item,
8234                             int inner, uint32_t group)
8235 {
8236         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
8237         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
8238         const struct rte_flow_item_ipv6 nic_mask = {
8239                 .hdr = {
8240                         .src_addr =
8241                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8242                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8243                         .dst_addr =
8244                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8245                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8246                         .vtc_flow = RTE_BE32(0xffffffff),
8247                         .proto = 0xff,
8248                         .hop_limits = 0xff,
8249                 },
8250         };
8251         void *headers_m;
8252         void *headers_v;
8253         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8254         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8255         char *l24_m;
8256         char *l24_v;
8257         uint32_t vtc_m;
8258         uint32_t vtc_v;
8259         int i;
8260         int size;
8261
8262         if (inner) {
8263                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8264                                          inner_headers);
8265                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8266         } else {
8267                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8268                                          outer_headers);
8269                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8270         }
8271         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
8272         if (!ipv6_v)
8273                 return;
8274         if (!ipv6_m)
8275                 ipv6_m = &nic_mask;
8276         size = sizeof(ipv6_m->hdr.dst_addr);
8277         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8278                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8279         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8280                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8281         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
8282         for (i = 0; i < size; ++i)
8283                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
8284         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8285                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8286         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8287                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8288         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
8289         for (i = 0; i < size; ++i)
8290                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
8291         /* TOS. */
8292         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
8293         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
8294         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
8295         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
8296         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
8297         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
8298         /* Label. */
8299         if (inner) {
8300                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
8301                          vtc_m);
8302                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
8303                          vtc_v);
8304         } else {
8305                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
8306                          vtc_m);
8307                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
8308                          vtc_v);
8309         }
8310         /* Protocol. */
8311         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8312                  ipv6_m->hdr.proto);
8313         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8314                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
8315         /* Hop limit. */
8316         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8317                  ipv6_m->hdr.hop_limits);
8318         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8319                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
8320         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8321                  !!(ipv6_m->has_frag_ext));
8322         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8323                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
8324 }
8325
8326 /**
8327  * Add IPV6 fragment extension item to matcher and to the value.
8328  *
8329  * @param[in, out] matcher
8330  *   Flow matcher.
8331  * @param[in, out] key
8332  *   Flow matcher value.
8333  * @param[in] item
8334  *   Flow pattern to translate.
8335  * @param[in] inner
8336  *   Item is inner pattern.
8337  */
8338 static void
8339 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
8340                                      const struct rte_flow_item *item,
8341                                      int inner)
8342 {
8343         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
8344         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
8345         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
8346                 .hdr = {
8347                         .next_header = 0xff,
8348                         .frag_data = RTE_BE16(0xffff),
8349                 },
8350         };
8351         void *headers_m;
8352         void *headers_v;
8353
8354         if (inner) {
8355                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8356                                          inner_headers);
8357                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8358         } else {
8359                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8360                                          outer_headers);
8361                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8362         }
8363         /* IPv6 fragment extension item exists, so packet is IP fragment. */
8364         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
8365         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
8366         if (!ipv6_frag_ext_v)
8367                 return;
8368         if (!ipv6_frag_ext_m)
8369                 ipv6_frag_ext_m = &nic_mask;
8370         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8371                  ipv6_frag_ext_m->hdr.next_header);
8372         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8373                  ipv6_frag_ext_v->hdr.next_header &
8374                  ipv6_frag_ext_m->hdr.next_header);
8375 }
8376
8377 /**
8378  * Add TCP item to matcher and to the value.
8379  *
8380  * @param[in, out] matcher
8381  *   Flow matcher.
8382  * @param[in, out] key
8383  *   Flow matcher value.
8384  * @param[in] item
8385  *   Flow pattern to translate.
8386  * @param[in] inner
8387  *   Item is inner pattern.
8388  */
8389 static void
8390 flow_dv_translate_item_tcp(void *matcher, void *key,
8391                            const struct rte_flow_item *item,
8392                            int inner)
8393 {
8394         const struct rte_flow_item_tcp *tcp_m = item->mask;
8395         const struct rte_flow_item_tcp *tcp_v = item->spec;
8396         void *headers_m;
8397         void *headers_v;
8398
8399         if (inner) {
8400                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8401                                          inner_headers);
8402                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8403         } else {
8404                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8405                                          outer_headers);
8406                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8407         }
8408         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8409         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8410         if (!tcp_v)
8411                 return;
8412         if (!tcp_m)
8413                 tcp_m = &rte_flow_item_tcp_mask;
8414         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8415                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
8416         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8417                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8418         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8419                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8420         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8421                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8422         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8423                  tcp_m->hdr.tcp_flags);
8424         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8425                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8426 }
8427
8428 /**
8429  * Add UDP item to matcher and to the value.
8430  *
8431  * @param[in, out] matcher
8432  *   Flow matcher.
8433  * @param[in, out] key
8434  *   Flow matcher value.
8435  * @param[in] item
8436  *   Flow pattern to translate.
8437  * @param[in] inner
8438  *   Item is inner pattern.
8439  */
8440 static void
8441 flow_dv_translate_item_udp(void *matcher, void *key,
8442                            const struct rte_flow_item *item,
8443                            int inner)
8444 {
8445         const struct rte_flow_item_udp *udp_m = item->mask;
8446         const struct rte_flow_item_udp *udp_v = item->spec;
8447         void *headers_m;
8448         void *headers_v;
8449
8450         if (inner) {
8451                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8452                                          inner_headers);
8453                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8454         } else {
8455                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8456                                          outer_headers);
8457                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8458         }
8459         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8460         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8461         if (!udp_v)
8462                 return;
8463         if (!udp_m)
8464                 udp_m = &rte_flow_item_udp_mask;
8465         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8466                  rte_be_to_cpu_16(udp_m->hdr.src_port));
8467         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8468                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8469         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8470                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
8471         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8472                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8473 }
8474
8475 /**
8476  * Add GRE optional Key item to matcher and to the value.
8477  *
8478  * @param[in, out] matcher
8479  *   Flow matcher.
8480  * @param[in, out] key
8481  *   Flow matcher value.
8482  * @param[in] item
8483  *   Flow pattern to translate.
8484  * @param[in] inner
8485  *   Item is inner pattern.
8486  */
8487 static void
8488 flow_dv_translate_item_gre_key(void *matcher, void *key,
8489                                    const struct rte_flow_item *item)
8490 {
8491         const rte_be32_t *key_m = item->mask;
8492         const rte_be32_t *key_v = item->spec;
8493         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8494         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8495         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8496
8497         /* GRE K bit must be on and should already be validated */
8498         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8499         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8500         if (!key_v)
8501                 return;
8502         if (!key_m)
8503                 key_m = &gre_key_default_mask;
8504         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8505                  rte_be_to_cpu_32(*key_m) >> 8);
8506         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8507                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8508         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8509                  rte_be_to_cpu_32(*key_m) & 0xFF);
8510         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8511                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8512 }
8513
8514 /**
8515  * Add GRE item to matcher and to the value.
8516  *
8517  * @param[in, out] matcher
8518  *   Flow matcher.
8519  * @param[in, out] key
8520  *   Flow matcher value.
8521  * @param[in] item
8522  *   Flow pattern to translate.
8523  * @param[in] inner
8524  *   Item is inner pattern.
8525  */
8526 static void
8527 flow_dv_translate_item_gre(void *matcher, void *key,
8528                            const struct rte_flow_item *item,
8529                            int inner)
8530 {
8531         const struct rte_flow_item_gre *gre_m = item->mask;
8532         const struct rte_flow_item_gre *gre_v = item->spec;
8533         void *headers_m;
8534         void *headers_v;
8535         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8536         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8537         struct {
8538                 union {
8539                         __extension__
8540                         struct {
8541                                 uint16_t version:3;
8542                                 uint16_t rsvd0:9;
8543                                 uint16_t s_present:1;
8544                                 uint16_t k_present:1;
8545                                 uint16_t rsvd_bit1:1;
8546                                 uint16_t c_present:1;
8547                         };
8548                         uint16_t value;
8549                 };
8550         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8551
8552         if (inner) {
8553                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8554                                          inner_headers);
8555                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8556         } else {
8557                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8558                                          outer_headers);
8559                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8560         }
8561         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8562         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8563         if (!gre_v)
8564                 return;
8565         if (!gre_m)
8566                 gre_m = &rte_flow_item_gre_mask;
8567         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
8568                  rte_be_to_cpu_16(gre_m->protocol));
8569         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8570                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
8571         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
8572         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
8573         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
8574                  gre_crks_rsvd0_ver_m.c_present);
8575         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
8576                  gre_crks_rsvd0_ver_v.c_present &
8577                  gre_crks_rsvd0_ver_m.c_present);
8578         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
8579                  gre_crks_rsvd0_ver_m.k_present);
8580         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
8581                  gre_crks_rsvd0_ver_v.k_present &
8582                  gre_crks_rsvd0_ver_m.k_present);
8583         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
8584                  gre_crks_rsvd0_ver_m.s_present);
8585         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
8586                  gre_crks_rsvd0_ver_v.s_present &
8587                  gre_crks_rsvd0_ver_m.s_present);
8588 }
8589
8590 /**
8591  * Add NVGRE item to matcher and to the value.
8592  *
8593  * @param[in, out] matcher
8594  *   Flow matcher.
8595  * @param[in, out] key
8596  *   Flow matcher value.
8597  * @param[in] item
8598  *   Flow pattern to translate.
8599  * @param[in] inner
8600  *   Item is inner pattern.
8601  */
8602 static void
8603 flow_dv_translate_item_nvgre(void *matcher, void *key,
8604                              const struct rte_flow_item *item,
8605                              int inner)
8606 {
8607         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
8608         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
8609         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8610         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8611         const char *tni_flow_id_m;
8612         const char *tni_flow_id_v;
8613         char *gre_key_m;
8614         char *gre_key_v;
8615         int size;
8616         int i;
8617
8618         /* For NVGRE, GRE header fields must be set with defined values. */
8619         const struct rte_flow_item_gre gre_spec = {
8620                 .c_rsvd0_ver = RTE_BE16(0x2000),
8621                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
8622         };
8623         const struct rte_flow_item_gre gre_mask = {
8624                 .c_rsvd0_ver = RTE_BE16(0xB000),
8625                 .protocol = RTE_BE16(UINT16_MAX),
8626         };
8627         const struct rte_flow_item gre_item = {
8628                 .spec = &gre_spec,
8629                 .mask = &gre_mask,
8630                 .last = NULL,
8631         };
8632         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
8633         if (!nvgre_v)
8634                 return;
8635         if (!nvgre_m)
8636                 nvgre_m = &rte_flow_item_nvgre_mask;
8637         tni_flow_id_m = (const char *)nvgre_m->tni;
8638         tni_flow_id_v = (const char *)nvgre_v->tni;
8639         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
8640         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
8641         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
8642         memcpy(gre_key_m, tni_flow_id_m, size);
8643         for (i = 0; i < size; ++i)
8644                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
8645 }
8646
8647 /**
8648  * Add VXLAN item to matcher and to the value.
8649  *
8650  * @param[in, out] matcher
8651  *   Flow matcher.
8652  * @param[in, out] key
8653  *   Flow matcher value.
8654  * @param[in] item
8655  *   Flow pattern to translate.
8656  * @param[in] inner
8657  *   Item is inner pattern.
8658  */
8659 static void
8660 flow_dv_translate_item_vxlan(void *matcher, void *key,
8661                              const struct rte_flow_item *item,
8662                              int inner)
8663 {
8664         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
8665         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
8666         void *headers_m;
8667         void *headers_v;
8668         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8669         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8670         char *vni_m;
8671         char *vni_v;
8672         uint16_t dport;
8673         int size;
8674         int i;
8675
8676         if (inner) {
8677                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8678                                          inner_headers);
8679                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8680         } else {
8681                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8682                                          outer_headers);
8683                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8684         }
8685         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8686                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8687         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8688                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8689                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8690         }
8691         if (!vxlan_v)
8692                 return;
8693         if (!vxlan_m)
8694                 vxlan_m = &rte_flow_item_vxlan_mask;
8695         size = sizeof(vxlan_m->vni);
8696         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8697         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8698         memcpy(vni_m, vxlan_m->vni, size);
8699         for (i = 0; i < size; ++i)
8700                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8701 }
8702
8703 /**
8704  * Add VXLAN-GPE item to matcher and to the value.
8705  *
8706  * @param[in, out] matcher
8707  *   Flow matcher.
8708  * @param[in, out] key
8709  *   Flow matcher value.
8710  * @param[in] item
8711  *   Flow pattern to translate.
8712  * @param[in] inner
8713  *   Item is inner pattern.
8714  */
8715
8716 static void
8717 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
8718                                  const struct rte_flow_item *item, int inner)
8719 {
8720         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
8721         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
8722         void *headers_m;
8723         void *headers_v;
8724         void *misc_m =
8725                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
8726         void *misc_v =
8727                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8728         char *vni_m;
8729         char *vni_v;
8730         uint16_t dport;
8731         int size;
8732         int i;
8733         uint8_t flags_m = 0xff;
8734         uint8_t flags_v = 0xc;
8735
8736         if (inner) {
8737                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8738                                          inner_headers);
8739                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8740         } else {
8741                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8742                                          outer_headers);
8743                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8744         }
8745         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8746                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8747         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8748                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8749                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8750         }
8751         if (!vxlan_v)
8752                 return;
8753         if (!vxlan_m)
8754                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
8755         size = sizeof(vxlan_m->vni);
8756         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
8757         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
8758         memcpy(vni_m, vxlan_m->vni, size);
8759         for (i = 0; i < size; ++i)
8760                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8761         if (vxlan_m->flags) {
8762                 flags_m = vxlan_m->flags;
8763                 flags_v = vxlan_v->flags;
8764         }
8765         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
8766         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
8767         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
8768                  vxlan_m->protocol);
8769         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
8770                  vxlan_v->protocol);
8771 }
8772
8773 /**
8774  * Add Geneve item to matcher and to the value.
8775  *
8776  * @param[in, out] matcher
8777  *   Flow matcher.
8778  * @param[in, out] key
8779  *   Flow matcher value.
8780  * @param[in] item
8781  *   Flow pattern to translate.
8782  * @param[in] inner
8783  *   Item is inner pattern.
8784  */
8785
8786 static void
8787 flow_dv_translate_item_geneve(void *matcher, void *key,
8788                               const struct rte_flow_item *item, int inner)
8789 {
8790         const struct rte_flow_item_geneve *geneve_m = item->mask;
8791         const struct rte_flow_item_geneve *geneve_v = item->spec;
8792         void *headers_m;
8793         void *headers_v;
8794         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8795         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8796         uint16_t dport;
8797         uint16_t gbhdr_m;
8798         uint16_t gbhdr_v;
8799         char *vni_m;
8800         char *vni_v;
8801         size_t size, i;
8802
8803         if (inner) {
8804                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8805                                          inner_headers);
8806                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8807         } else {
8808                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8809                                          outer_headers);
8810                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8811         }
8812         dport = MLX5_UDP_PORT_GENEVE;
8813         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8814                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8815                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8816         }
8817         if (!geneve_v)
8818                 return;
8819         if (!geneve_m)
8820                 geneve_m = &rte_flow_item_geneve_mask;
8821         size = sizeof(geneve_m->vni);
8822         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
8823         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
8824         memcpy(vni_m, geneve_m->vni, size);
8825         for (i = 0; i < size; ++i)
8826                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
8827         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
8828                  rte_be_to_cpu_16(geneve_m->protocol));
8829         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
8830                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
8831         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
8832         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
8833         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
8834                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8835         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
8836                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8837         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8838                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8839         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8840                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
8841                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8842 }
8843
8844 /**
8845  * Create Geneve TLV option resource.
8846  *
8847  * @param dev[in, out]
8848  *   Pointer to rte_eth_dev structure.
8849  * @param[in, out] tag_be24
8850  *   Tag value in big endian then R-shift 8.
8851  * @parm[in, out] dev_flow
8852  *   Pointer to the dev_flow.
8853  * @param[out] error
8854  *   pointer to error structure.
8855  *
8856  * @return
8857  *   0 on success otherwise -errno and errno is set.
8858  */
8859
8860 int
8861 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
8862                                              const struct rte_flow_item *item,
8863                                              struct rte_flow_error *error)
8864 {
8865         struct mlx5_priv *priv = dev->data->dev_private;
8866         struct mlx5_dev_ctx_shared *sh = priv->sh;
8867         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
8868                         sh->geneve_tlv_option_resource;
8869         struct mlx5_devx_obj *obj;
8870         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8871         int ret = 0;
8872
8873         if (!geneve_opt_v)
8874                 return -1;
8875         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
8876         if (geneve_opt_resource != NULL) {
8877                 if (geneve_opt_resource->option_class ==
8878                         geneve_opt_v->option_class &&
8879                         geneve_opt_resource->option_type ==
8880                         geneve_opt_v->option_type &&
8881                         geneve_opt_resource->length ==
8882                         geneve_opt_v->option_len) {
8883                         /* We already have GENVE TLV option obj allocated. */
8884                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
8885                                            __ATOMIC_RELAXED);
8886                 } else {
8887                         ret = rte_flow_error_set(error, ENOMEM,
8888                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8889                                 "Only one GENEVE TLV option supported");
8890                         goto exit;
8891                 }
8892         } else {
8893                 /* Create a GENEVE TLV object and resource. */
8894                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
8895                                 geneve_opt_v->option_class,
8896                                 geneve_opt_v->option_type,
8897                                 geneve_opt_v->option_len);
8898                 if (!obj) {
8899                         ret = rte_flow_error_set(error, ENODATA,
8900                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8901                                 "Failed to create GENEVE TLV Devx object");
8902                         goto exit;
8903                 }
8904                 sh->geneve_tlv_option_resource =
8905                                 mlx5_malloc(MLX5_MEM_ZERO,
8906                                                 sizeof(*geneve_opt_resource),
8907                                                 0, SOCKET_ID_ANY);
8908                 if (!sh->geneve_tlv_option_resource) {
8909                         claim_zero(mlx5_devx_cmd_destroy(obj));
8910                         ret = rte_flow_error_set(error, ENOMEM,
8911                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8912                                 "GENEVE TLV object memory allocation failed");
8913                         goto exit;
8914                 }
8915                 geneve_opt_resource = sh->geneve_tlv_option_resource;
8916                 geneve_opt_resource->obj = obj;
8917                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
8918                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
8919                 geneve_opt_resource->length = geneve_opt_v->option_len;
8920                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
8921                                 __ATOMIC_RELAXED);
8922         }
8923 exit:
8924         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
8925         return ret;
8926 }
8927
8928 /**
8929  * Add Geneve TLV option item to matcher.
8930  *
8931  * @param[in, out] dev
8932  *   Pointer to rte_eth_dev structure.
8933  * @param[in, out] matcher
8934  *   Flow matcher.
8935  * @param[in, out] key
8936  *   Flow matcher value.
8937  * @param[in] item
8938  *   Flow pattern to translate.
8939  * @param[out] error
8940  *   Pointer to error structure.
8941  */
8942 static int
8943 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
8944                                   void *key, const struct rte_flow_item *item,
8945                                   struct rte_flow_error *error)
8946 {
8947         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
8948         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8949         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8950         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8951         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8952                         misc_parameters_3);
8953         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8954         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
8955         int ret = 0;
8956
8957         if (!geneve_opt_v)
8958                 return -1;
8959         if (!geneve_opt_m)
8960                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
8961         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
8962                                                            error);
8963         if (ret) {
8964                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
8965                 return ret;
8966         }
8967         /*
8968          * Set the option length in GENEVE header if not requested.
8969          * The GENEVE TLV option length is expressed by the option length field
8970          * in the GENEVE header.
8971          * If the option length was not requested but the GENEVE TLV option item
8972          * is present we set the option length field implicitly.
8973          */
8974         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
8975                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8976                          MLX5_GENEVE_OPTLEN_MASK);
8977                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8978                          geneve_opt_v->option_len + 1);
8979         }
8980         /* Set the data. */
8981         if (geneve_opt_v->data) {
8982                 memcpy(&opt_data_key, geneve_opt_v->data,
8983                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
8984                                 sizeof(opt_data_key)));
8985                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
8986                                 sizeof(opt_data_key));
8987                 memcpy(&opt_data_mask, geneve_opt_m->data,
8988                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
8989                                 sizeof(opt_data_mask)));
8990                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
8991                                 sizeof(opt_data_mask));
8992                 MLX5_SET(fte_match_set_misc3, misc3_m,
8993                                 geneve_tlv_option_0_data,
8994                                 rte_be_to_cpu_32(opt_data_mask));
8995                 MLX5_SET(fte_match_set_misc3, misc3_v,
8996                                 geneve_tlv_option_0_data,
8997                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
8998         }
8999         return ret;
9000 }
9001
9002 /**
9003  * Add MPLS item to matcher and to the value.
9004  *
9005  * @param[in, out] matcher
9006  *   Flow matcher.
9007  * @param[in, out] key
9008  *   Flow matcher value.
9009  * @param[in] item
9010  *   Flow pattern to translate.
9011  * @param[in] prev_layer
9012  *   The protocol layer indicated in previous item.
9013  * @param[in] inner
9014  *   Item is inner pattern.
9015  */
9016 static void
9017 flow_dv_translate_item_mpls(void *matcher, void *key,
9018                             const struct rte_flow_item *item,
9019                             uint64_t prev_layer,
9020                             int inner)
9021 {
9022         const uint32_t *in_mpls_m = item->mask;
9023         const uint32_t *in_mpls_v = item->spec;
9024         uint32_t *out_mpls_m = 0;
9025         uint32_t *out_mpls_v = 0;
9026         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9027         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9028         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
9029                                      misc_parameters_2);
9030         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9031         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9032         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9033
9034         switch (prev_layer) {
9035         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9036                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
9037                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9038                          MLX5_UDP_PORT_MPLS);
9039                 break;
9040         case MLX5_FLOW_LAYER_GRE:
9041                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
9042                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
9043                          RTE_ETHER_TYPE_MPLS);
9044                 break;
9045         default:
9046                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
9047                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
9048                          IPPROTO_MPLS);
9049                 break;
9050         }
9051         if (!in_mpls_v)
9052                 return;
9053         if (!in_mpls_m)
9054                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
9055         switch (prev_layer) {
9056         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9057                 out_mpls_m =
9058                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9059                                                  outer_first_mpls_over_udp);
9060                 out_mpls_v =
9061                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9062                                                  outer_first_mpls_over_udp);
9063                 break;
9064         case MLX5_FLOW_LAYER_GRE:
9065                 out_mpls_m =
9066                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9067                                                  outer_first_mpls_over_gre);
9068                 out_mpls_v =
9069                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9070                                                  outer_first_mpls_over_gre);
9071                 break;
9072         default:
9073                 /* Inner MPLS not over GRE is not supported. */
9074                 if (!inner) {
9075                         out_mpls_m =
9076                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9077                                                          misc2_m,
9078                                                          outer_first_mpls);
9079                         out_mpls_v =
9080                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9081                                                          misc2_v,
9082                                                          outer_first_mpls);
9083                 }
9084                 break;
9085         }
9086         if (out_mpls_m && out_mpls_v) {
9087                 *out_mpls_m = *in_mpls_m;
9088                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
9089         }
9090 }
9091
9092 /**
9093  * Add metadata register item to matcher
9094  *
9095  * @param[in, out] matcher
9096  *   Flow matcher.
9097  * @param[in, out] key
9098  *   Flow matcher value.
9099  * @param[in] reg_type
9100  *   Type of device metadata register
9101  * @param[in] value
9102  *   Register value
9103  * @param[in] mask
9104  *   Register mask
9105  */
9106 static void
9107 flow_dv_match_meta_reg(void *matcher, void *key,
9108                        enum modify_reg reg_type,
9109                        uint32_t data, uint32_t mask)
9110 {
9111         void *misc2_m =
9112                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
9113         void *misc2_v =
9114                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9115         uint32_t temp;
9116
9117         data &= mask;
9118         switch (reg_type) {
9119         case REG_A:
9120                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
9121                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
9122                 break;
9123         case REG_B:
9124                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
9125                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
9126                 break;
9127         case REG_C_0:
9128                 /*
9129                  * The metadata register C0 field might be divided into
9130                  * source vport index and META item value, we should set
9131                  * this field according to specified mask, not as whole one.
9132                  */
9133                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
9134                 temp |= mask;
9135                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
9136                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
9137                 temp &= ~mask;
9138                 temp |= data;
9139                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
9140                 break;
9141         case REG_C_1:
9142                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
9143                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
9144                 break;
9145         case REG_C_2:
9146                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
9147                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
9148                 break;
9149         case REG_C_3:
9150                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
9151                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
9152                 break;
9153         case REG_C_4:
9154                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
9155                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
9156                 break;
9157         case REG_C_5:
9158                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
9159                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
9160                 break;
9161         case REG_C_6:
9162                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
9163                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
9164                 break;
9165         case REG_C_7:
9166                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
9167                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
9168                 break;
9169         default:
9170                 MLX5_ASSERT(false);
9171                 break;
9172         }
9173 }
9174
9175 /**
9176  * Add MARK item to matcher
9177  *
9178  * @param[in] dev
9179  *   The device to configure through.
9180  * @param[in, out] matcher
9181  *   Flow matcher.
9182  * @param[in, out] key
9183  *   Flow matcher value.
9184  * @param[in] item
9185  *   Flow pattern to translate.
9186  */
9187 static void
9188 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
9189                             void *matcher, void *key,
9190                             const struct rte_flow_item *item)
9191 {
9192         struct mlx5_priv *priv = dev->data->dev_private;
9193         const struct rte_flow_item_mark *mark;
9194         uint32_t value;
9195         uint32_t mask;
9196
9197         mark = item->mask ? (const void *)item->mask :
9198                             &rte_flow_item_mark_mask;
9199         mask = mark->id & priv->sh->dv_mark_mask;
9200         mark = (const void *)item->spec;
9201         MLX5_ASSERT(mark);
9202         value = mark->id & priv->sh->dv_mark_mask & mask;
9203         if (mask) {
9204                 enum modify_reg reg;
9205
9206                 /* Get the metadata register index for the mark. */
9207                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
9208                 MLX5_ASSERT(reg > 0);
9209                 if (reg == REG_C_0) {
9210                         struct mlx5_priv *priv = dev->data->dev_private;
9211                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9212                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9213
9214                         mask &= msk_c0;
9215                         mask <<= shl_c0;
9216                         value <<= shl_c0;
9217                 }
9218                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9219         }
9220 }
9221
9222 /**
9223  * Add META item to matcher
9224  *
9225  * @param[in] dev
9226  *   The devich to configure through.
9227  * @param[in, out] matcher
9228  *   Flow matcher.
9229  * @param[in, out] key
9230  *   Flow matcher value.
9231  * @param[in] attr
9232  *   Attributes of flow that includes this item.
9233  * @param[in] item
9234  *   Flow pattern to translate.
9235  */
9236 static void
9237 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
9238                             void *matcher, void *key,
9239                             const struct rte_flow_attr *attr,
9240                             const struct rte_flow_item *item)
9241 {
9242         const struct rte_flow_item_meta *meta_m;
9243         const struct rte_flow_item_meta *meta_v;
9244
9245         meta_m = (const void *)item->mask;
9246         if (!meta_m)
9247                 meta_m = &rte_flow_item_meta_mask;
9248         meta_v = (const void *)item->spec;
9249         if (meta_v) {
9250                 int reg;
9251                 uint32_t value = meta_v->data;
9252                 uint32_t mask = meta_m->data;
9253
9254                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
9255                 if (reg < 0)
9256                         return;
9257                 MLX5_ASSERT(reg != REG_NON);
9258                 /*
9259                  * In datapath code there is no endianness
9260                  * coversions for perfromance reasons, all
9261                  * pattern conversions are done in rte_flow.
9262                  */
9263                 value = rte_cpu_to_be_32(value);
9264                 mask = rte_cpu_to_be_32(mask);
9265                 if (reg == REG_C_0) {
9266                         struct mlx5_priv *priv = dev->data->dev_private;
9267                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9268                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9269 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
9270                         uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
9271
9272                         value >>= shr_c0;
9273                         mask >>= shr_c0;
9274 #endif
9275                         value <<= shl_c0;
9276                         mask <<= shl_c0;
9277                         MLX5_ASSERT(msk_c0);
9278                         MLX5_ASSERT(!(~msk_c0 & mask));
9279                 }
9280                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9281         }
9282 }
9283
9284 /**
9285  * Add vport metadata Reg C0 item to matcher
9286  *
9287  * @param[in, out] matcher
9288  *   Flow matcher.
9289  * @param[in, out] key
9290  *   Flow matcher value.
9291  * @param[in] reg
9292  *   Flow pattern to translate.
9293  */
9294 static void
9295 flow_dv_translate_item_meta_vport(void *matcher, void *key,
9296                                   uint32_t value, uint32_t mask)
9297 {
9298         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
9299 }
9300
9301 /**
9302  * Add tag item to matcher
9303  *
9304  * @param[in] dev
9305  *   The devich to configure through.
9306  * @param[in, out] matcher
9307  *   Flow matcher.
9308  * @param[in, out] key
9309  *   Flow matcher value.
9310  * @param[in] item
9311  *   Flow pattern to translate.
9312  */
9313 static void
9314 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
9315                                 void *matcher, void *key,
9316                                 const struct rte_flow_item *item)
9317 {
9318         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
9319         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
9320         uint32_t mask, value;
9321
9322         MLX5_ASSERT(tag_v);
9323         value = tag_v->data;
9324         mask = tag_m ? tag_m->data : UINT32_MAX;
9325         if (tag_v->id == REG_C_0) {
9326                 struct mlx5_priv *priv = dev->data->dev_private;
9327                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9328                 uint32_t shl_c0 = rte_bsf32(msk_c0);
9329
9330                 mask &= msk_c0;
9331                 mask <<= shl_c0;
9332                 value <<= shl_c0;
9333         }
9334         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
9335 }
9336
9337 /**
9338  * Add TAG item to matcher
9339  *
9340  * @param[in] dev
9341  *   The devich to configure through.
9342  * @param[in, out] matcher
9343  *   Flow matcher.
9344  * @param[in, out] key
9345  *   Flow matcher value.
9346  * @param[in] item
9347  *   Flow pattern to translate.
9348  */
9349 static void
9350 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
9351                            void *matcher, void *key,
9352                            const struct rte_flow_item *item)
9353 {
9354         const struct rte_flow_item_tag *tag_v = item->spec;
9355         const struct rte_flow_item_tag *tag_m = item->mask;
9356         enum modify_reg reg;
9357
9358         MLX5_ASSERT(tag_v);
9359         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
9360         /* Get the metadata register index for the tag. */
9361         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
9362         MLX5_ASSERT(reg > 0);
9363         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
9364 }
9365
9366 /**
9367  * Add source vport match to the specified matcher.
9368  *
9369  * @param[in, out] matcher
9370  *   Flow matcher.
9371  * @param[in, out] key
9372  *   Flow matcher value.
9373  * @param[in] port
9374  *   Source vport value to match
9375  * @param[in] mask
9376  *   Mask
9377  */
9378 static void
9379 flow_dv_translate_item_source_vport(void *matcher, void *key,
9380                                     int16_t port, uint16_t mask)
9381 {
9382         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9383         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9384
9385         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
9386         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
9387 }
9388
9389 /**
9390  * Translate port-id item to eswitch match on  port-id.
9391  *
9392  * @param[in] dev
9393  *   The devich to configure through.
9394  * @param[in, out] matcher
9395  *   Flow matcher.
9396  * @param[in, out] key
9397  *   Flow matcher value.
9398  * @param[in] item
9399  *   Flow pattern to translate.
9400  * @param[in]
9401  *   Flow attributes.
9402  *
9403  * @return
9404  *   0 on success, a negative errno value otherwise.
9405  */
9406 static int
9407 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
9408                                void *key, const struct rte_flow_item *item,
9409                                const struct rte_flow_attr *attr)
9410 {
9411         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
9412         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
9413         struct mlx5_priv *priv;
9414         uint16_t mask, id;
9415
9416         mask = pid_m ? pid_m->id : 0xffff;
9417         id = pid_v ? pid_v->id : dev->data->port_id;
9418         priv = mlx5_port_to_eswitch_info(id, item == NULL);
9419         if (!priv)
9420                 return -rte_errno;
9421         /*
9422          * Translate to vport field or to metadata, depending on mode.
9423          * Kernel can use either misc.source_port or half of C0 metadata
9424          * register.
9425          */
9426         if (priv->vport_meta_mask) {
9427                 /*
9428                  * Provide the hint for SW steering library
9429                  * to insert the flow into ingress domain and
9430                  * save the extra vport match.
9431                  */
9432                 if (mask == 0xffff && priv->vport_id == 0xffff &&
9433                     priv->pf_bond < 0 && attr->transfer)
9434                         flow_dv_translate_item_source_vport
9435                                 (matcher, key, priv->vport_id, mask);
9436                 /*
9437                  * We should always set the vport metadata register,
9438                  * otherwise the SW steering library can drop
9439                  * the rule if wire vport metadata value is not zero,
9440                  * it depends on kernel configuration.
9441                  */
9442                 flow_dv_translate_item_meta_vport(matcher, key,
9443                                                   priv->vport_meta_tag,
9444                                                   priv->vport_meta_mask);
9445         } else {
9446                 flow_dv_translate_item_source_vport(matcher, key,
9447                                                     priv->vport_id, mask);
9448         }
9449         return 0;
9450 }
9451
9452 /**
9453  * Add ICMP6 item to matcher and to the value.
9454  *
9455  * @param[in, out] matcher
9456  *   Flow matcher.
9457  * @param[in, out] key
9458  *   Flow matcher value.
9459  * @param[in] item
9460  *   Flow pattern to translate.
9461  * @param[in] inner
9462  *   Item is inner pattern.
9463  */
9464 static void
9465 flow_dv_translate_item_icmp6(void *matcher, void *key,
9466                               const struct rte_flow_item *item,
9467                               int inner)
9468 {
9469         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
9470         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
9471         void *headers_m;
9472         void *headers_v;
9473         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9474                                      misc_parameters_3);
9475         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9476         if (inner) {
9477                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9478                                          inner_headers);
9479                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9480         } else {
9481                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9482                                          outer_headers);
9483                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9484         }
9485         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9486         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
9487         if (!icmp6_v)
9488                 return;
9489         if (!icmp6_m)
9490                 icmp6_m = &rte_flow_item_icmp6_mask;
9491         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
9492         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
9493                  icmp6_v->type & icmp6_m->type);
9494         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
9495         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
9496                  icmp6_v->code & icmp6_m->code);
9497 }
9498
9499 /**
9500  * Add ICMP item to matcher and to the value.
9501  *
9502  * @param[in, out] matcher
9503  *   Flow matcher.
9504  * @param[in, out] key
9505  *   Flow matcher value.
9506  * @param[in] item
9507  *   Flow pattern to translate.
9508  * @param[in] inner
9509  *   Item is inner pattern.
9510  */
9511 static void
9512 flow_dv_translate_item_icmp(void *matcher, void *key,
9513                             const struct rte_flow_item *item,
9514                             int inner)
9515 {
9516         const struct rte_flow_item_icmp *icmp_m = item->mask;
9517         const struct rte_flow_item_icmp *icmp_v = item->spec;
9518         uint32_t icmp_header_data_m = 0;
9519         uint32_t icmp_header_data_v = 0;
9520         void *headers_m;
9521         void *headers_v;
9522         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9523                                      misc_parameters_3);
9524         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9525         if (inner) {
9526                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9527                                          inner_headers);
9528                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9529         } else {
9530                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9531                                          outer_headers);
9532                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9533         }
9534         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9535         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
9536         if (!icmp_v)
9537                 return;
9538         if (!icmp_m)
9539                 icmp_m = &rte_flow_item_icmp_mask;
9540         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
9541                  icmp_m->hdr.icmp_type);
9542         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
9543                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
9544         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
9545                  icmp_m->hdr.icmp_code);
9546         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
9547                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
9548         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
9549         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
9550         if (icmp_header_data_m) {
9551                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
9552                 icmp_header_data_v |=
9553                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
9554                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
9555                          icmp_header_data_m);
9556                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
9557                          icmp_header_data_v & icmp_header_data_m);
9558         }
9559 }
9560
9561 /**
9562  * Add GTP item to matcher and to the value.
9563  *
9564  * @param[in, out] matcher
9565  *   Flow matcher.
9566  * @param[in, out] key
9567  *   Flow matcher value.
9568  * @param[in] item
9569  *   Flow pattern to translate.
9570  * @param[in] inner
9571  *   Item is inner pattern.
9572  */
9573 static void
9574 flow_dv_translate_item_gtp(void *matcher, void *key,
9575                            const struct rte_flow_item *item, int inner)
9576 {
9577         const struct rte_flow_item_gtp *gtp_m = item->mask;
9578         const struct rte_flow_item_gtp *gtp_v = item->spec;
9579         void *headers_m;
9580         void *headers_v;
9581         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9582                                      misc_parameters_3);
9583         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9584         uint16_t dport = RTE_GTPU_UDP_PORT;
9585
9586         if (inner) {
9587                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9588                                          inner_headers);
9589                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9590         } else {
9591                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9592                                          outer_headers);
9593                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9594         }
9595         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9596                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9597                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9598         }
9599         if (!gtp_v)
9600                 return;
9601         if (!gtp_m)
9602                 gtp_m = &rte_flow_item_gtp_mask;
9603         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
9604                  gtp_m->v_pt_rsv_flags);
9605         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
9606                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
9607         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
9608         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
9609                  gtp_v->msg_type & gtp_m->msg_type);
9610         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
9611                  rte_be_to_cpu_32(gtp_m->teid));
9612         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
9613                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
9614 }
9615
9616 /**
9617  * Add GTP PSC item to matcher.
9618  *
9619  * @param[in, out] matcher
9620  *   Flow matcher.
9621  * @param[in, out] key
9622  *   Flow matcher value.
9623  * @param[in] item
9624  *   Flow pattern to translate.
9625  */
9626 static int
9627 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
9628                                const struct rte_flow_item *item)
9629 {
9630         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
9631         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
9632         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9633                         misc_parameters_3);
9634         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9635         union {
9636                 uint32_t w32;
9637                 struct {
9638                         uint16_t seq_num;
9639                         uint8_t npdu_num;
9640                         uint8_t next_ext_header_type;
9641                 };
9642         } dw_2;
9643         uint8_t gtp_flags;
9644
9645         /* Always set E-flag match on one, regardless of GTP item settings. */
9646         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
9647         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9648         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
9649         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
9650         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9651         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
9652         /*Set next extension header type. */
9653         dw_2.seq_num = 0;
9654         dw_2.npdu_num = 0;
9655         dw_2.next_ext_header_type = 0xff;
9656         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
9657                  rte_cpu_to_be_32(dw_2.w32));
9658         dw_2.seq_num = 0;
9659         dw_2.npdu_num = 0;
9660         dw_2.next_ext_header_type = 0x85;
9661         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
9662                  rte_cpu_to_be_32(dw_2.w32));
9663         if (gtp_psc_v) {
9664                 union {
9665                         uint32_t w32;
9666                         struct {
9667                                 uint8_t len;
9668                                 uint8_t type_flags;
9669                                 uint8_t qfi;
9670                                 uint8_t reserved;
9671                         };
9672                 } dw_0;
9673
9674                 /*Set extension header PDU type and Qos. */
9675                 if (!gtp_psc_m)
9676                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
9677                 dw_0.w32 = 0;
9678                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type);
9679                 dw_0.qfi = gtp_psc_m->qfi;
9680                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
9681                          rte_cpu_to_be_32(dw_0.w32));
9682                 dw_0.w32 = 0;
9683                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type &
9684                                                         gtp_psc_m->pdu_type);
9685                 dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi;
9686                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9687                          rte_cpu_to_be_32(dw_0.w32));
9688         }
9689         return 0;
9690 }
9691
9692 /**
9693  * Add eCPRI item to matcher and to the value.
9694  *
9695  * @param[in] dev
9696  *   The devich to configure through.
9697  * @param[in, out] matcher
9698  *   Flow matcher.
9699  * @param[in, out] key
9700  *   Flow matcher value.
9701  * @param[in] item
9702  *   Flow pattern to translate.
9703  * @param[in] samples
9704  *   Sample IDs to be used in the matching.
9705  */
9706 static void
9707 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
9708                              void *key, const struct rte_flow_item *item)
9709 {
9710         struct mlx5_priv *priv = dev->data->dev_private;
9711         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
9712         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
9713         struct rte_ecpri_common_hdr common;
9714         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
9715                                      misc_parameters_4);
9716         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
9717         uint32_t *samples;
9718         void *dw_m;
9719         void *dw_v;
9720
9721         if (!ecpri_v)
9722                 return;
9723         if (!ecpri_m)
9724                 ecpri_m = &rte_flow_item_ecpri_mask;
9725         /*
9726          * Maximal four DW samples are supported in a single matching now.
9727          * Two are used now for a eCPRI matching:
9728          * 1. Type: one byte, mask should be 0x00ff0000 in network order
9729          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
9730          *    if any.
9731          */
9732         if (!ecpri_m->hdr.common.u32)
9733                 return;
9734         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
9735         /* Need to take the whole DW as the mask to fill the entry. */
9736         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9737                             prog_sample_field_value_0);
9738         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9739                             prog_sample_field_value_0);
9740         /* Already big endian (network order) in the header. */
9741         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
9742         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
9743         /* Sample#0, used for matching type, offset 0. */
9744         MLX5_SET(fte_match_set_misc4, misc4_m,
9745                  prog_sample_field_id_0, samples[0]);
9746         /* It makes no sense to set the sample ID in the mask field. */
9747         MLX5_SET(fte_match_set_misc4, misc4_v,
9748                  prog_sample_field_id_0, samples[0]);
9749         /*
9750          * Checking if message body part needs to be matched.
9751          * Some wildcard rules only matching type field should be supported.
9752          */
9753         if (ecpri_m->hdr.dummy[0]) {
9754                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
9755                 switch (common.type) {
9756                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
9757                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
9758                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
9759                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9760                                             prog_sample_field_value_1);
9761                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9762                                             prog_sample_field_value_1);
9763                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
9764                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
9765                                             ecpri_m->hdr.dummy[0];
9766                         /* Sample#1, to match message body, offset 4. */
9767                         MLX5_SET(fte_match_set_misc4, misc4_m,
9768                                  prog_sample_field_id_1, samples[1]);
9769                         MLX5_SET(fte_match_set_misc4, misc4_v,
9770                                  prog_sample_field_id_1, samples[1]);
9771                         break;
9772                 default:
9773                         /* Others, do not match any sample ID. */
9774                         break;
9775                 }
9776         }
9777 }
9778
9779 /*
9780  * Add connection tracking status item to matcher
9781  *
9782  * @param[in] dev
9783  *   The devich to configure through.
9784  * @param[in, out] matcher
9785  *   Flow matcher.
9786  * @param[in, out] key
9787  *   Flow matcher value.
9788  * @param[in] item
9789  *   Flow pattern to translate.
9790  */
9791 static void
9792 flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
9793                               void *matcher, void *key,
9794                               const struct rte_flow_item *item)
9795 {
9796         uint32_t reg_value = 0;
9797         int reg_id;
9798         /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */
9799         uint32_t reg_mask = 0;
9800         const struct rte_flow_item_conntrack *spec = item->spec;
9801         const struct rte_flow_item_conntrack *mask = item->mask;
9802         uint32_t flags;
9803         struct rte_flow_error error;
9804
9805         if (!mask)
9806                 mask = &rte_flow_item_conntrack_mask;
9807         if (!spec || !mask->flags)
9808                 return;
9809         flags = spec->flags & mask->flags;
9810         /* The conflict should be checked in the validation. */
9811         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
9812                 reg_value |= MLX5_CT_SYNDROME_VALID;
9813         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
9814                 reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
9815         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
9816                 reg_value |= MLX5_CT_SYNDROME_INVALID;
9817         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
9818                 reg_value |= MLX5_CT_SYNDROME_TRAP;
9819         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
9820                 reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
9821         if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
9822                            RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
9823                            RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
9824                 reg_mask |= 0xc0;
9825         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
9826                 reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
9827         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
9828                 reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
9829         /* The REG_C_x value could be saved during startup. */
9830         reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);
9831         if (reg_id == REG_NON)
9832                 return;
9833         flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,
9834                                reg_value, reg_mask);
9835 }
9836
9837 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
9838
9839 #define HEADER_IS_ZERO(match_criteria, headers)                              \
9840         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
9841                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
9842
9843 /**
9844  * Calculate flow matcher enable bitmap.
9845  *
9846  * @param match_criteria
9847  *   Pointer to flow matcher criteria.
9848  *
9849  * @return
9850  *   Bitmap of enabled fields.
9851  */
9852 static uint8_t
9853 flow_dv_matcher_enable(uint32_t *match_criteria)
9854 {
9855         uint8_t match_criteria_enable;
9856
9857         match_criteria_enable =
9858                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
9859                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
9860         match_criteria_enable |=
9861                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
9862                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
9863         match_criteria_enable |=
9864                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
9865                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
9866         match_criteria_enable |=
9867                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
9868                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
9869         match_criteria_enable |=
9870                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
9871                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
9872         match_criteria_enable |=
9873                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
9874                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
9875         return match_criteria_enable;
9876 }
9877
9878 struct mlx5_hlist_entry *
9879 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
9880 {
9881         struct mlx5_dev_ctx_shared *sh = list->ctx;
9882         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9883         struct rte_eth_dev *dev = ctx->dev;
9884         struct mlx5_flow_tbl_data_entry *tbl_data;
9885         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
9886         struct rte_flow_error *error = ctx->error;
9887         union mlx5_flow_tbl_key key = { .v64 = key64 };
9888         struct mlx5_flow_tbl_resource *tbl;
9889         void *domain;
9890         uint32_t idx = 0;
9891         int ret;
9892
9893         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
9894         if (!tbl_data) {
9895                 rte_flow_error_set(error, ENOMEM,
9896                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9897                                    NULL,
9898                                    "cannot allocate flow table data entry");
9899                 return NULL;
9900         }
9901         tbl_data->idx = idx;
9902         tbl_data->tunnel = tt_prm->tunnel;
9903         tbl_data->group_id = tt_prm->group_id;
9904         tbl_data->external = !!tt_prm->external;
9905         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
9906         tbl_data->is_egress = !!key.is_egress;
9907         tbl_data->is_transfer = !!key.is_fdb;
9908         tbl_data->dummy = !!key.dummy;
9909         tbl_data->level = key.level;
9910         tbl_data->id = key.id;
9911         tbl = &tbl_data->tbl;
9912         if (key.dummy)
9913                 return &tbl_data->entry;
9914         if (key.is_fdb)
9915                 domain = sh->fdb_domain;
9916         else if (key.is_egress)
9917                 domain = sh->tx_domain;
9918         else
9919                 domain = sh->rx_domain;
9920         ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
9921         if (ret) {
9922                 rte_flow_error_set(error, ENOMEM,
9923                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9924                                    NULL, "cannot create flow table object");
9925                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9926                 return NULL;
9927         }
9928         if (key.level != 0) {
9929                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
9930                                         (tbl->obj, &tbl_data->jump.action);
9931                 if (ret) {
9932                         rte_flow_error_set(error, ENOMEM,
9933                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9934                                            NULL,
9935                                            "cannot create flow jump action");
9936                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
9937                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9938                         return NULL;
9939                 }
9940         }
9941         MKSTR(matcher_name, "%s_%s_%u_%u_matcher_cache",
9942               key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
9943               key.level, key.id);
9944         mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
9945                              flow_dv_matcher_create_cb,
9946                              flow_dv_matcher_match_cb,
9947                              flow_dv_matcher_remove_cb);
9948         return &tbl_data->entry;
9949 }
9950
9951 int
9952 flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused,
9953                      struct mlx5_hlist_entry *entry, uint64_t key64,
9954                      void *cb_ctx __rte_unused)
9955 {
9956         struct mlx5_flow_tbl_data_entry *tbl_data =
9957                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9958         union mlx5_flow_tbl_key key = { .v64 = key64 };
9959
9960         return tbl_data->level != key.level ||
9961                tbl_data->id != key.id ||
9962                tbl_data->dummy != key.dummy ||
9963                tbl_data->is_transfer != !!key.is_fdb ||
9964                tbl_data->is_egress != !!key.is_egress;
9965 }
9966
9967 /**
9968  * Get a flow table.
9969  *
9970  * @param[in, out] dev
9971  *   Pointer to rte_eth_dev structure.
9972  * @param[in] table_level
9973  *   Table level to use.
9974  * @param[in] egress
9975  *   Direction of the table.
9976  * @param[in] transfer
9977  *   E-Switch or NIC flow.
9978  * @param[in] dummy
9979  *   Dummy entry for dv API.
9980  * @param[in] table_id
9981  *   Table id to use.
9982  * @param[out] error
9983  *   pointer to error structure.
9984  *
9985  * @return
9986  *   Returns tables resource based on the index, NULL in case of failed.
9987  */
9988 struct mlx5_flow_tbl_resource *
9989 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
9990                          uint32_t table_level, uint8_t egress,
9991                          uint8_t transfer,
9992                          bool external,
9993                          const struct mlx5_flow_tunnel *tunnel,
9994                          uint32_t group_id, uint8_t dummy,
9995                          uint32_t table_id,
9996                          struct rte_flow_error *error)
9997 {
9998         struct mlx5_priv *priv = dev->data->dev_private;
9999         union mlx5_flow_tbl_key table_key = {
10000                 {
10001                         .level = table_level,
10002                         .id = table_id,
10003                         .reserved = 0,
10004                         .dummy = !!dummy,
10005                         .is_fdb = !!transfer,
10006                         .is_egress = !!egress,
10007                 }
10008         };
10009         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
10010                 .tunnel = tunnel,
10011                 .group_id = group_id,
10012                 .external = external,
10013         };
10014         struct mlx5_flow_cb_ctx ctx = {
10015                 .dev = dev,
10016                 .error = error,
10017                 .data = &tt_prm,
10018         };
10019         struct mlx5_hlist_entry *entry;
10020         struct mlx5_flow_tbl_data_entry *tbl_data;
10021
10022         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
10023         if (!entry) {
10024                 rte_flow_error_set(error, ENOMEM,
10025                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10026                                    "cannot get table");
10027                 return NULL;
10028         }
10029         DRV_LOG(DEBUG, "table_level %u table_id %u "
10030                 "tunnel %u group %u registered.",
10031                 table_level, table_id,
10032                 tunnel ? tunnel->tunnel_id : 0, group_id);
10033         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10034         return &tbl_data->tbl;
10035 }
10036
10037 void
10038 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
10039                       struct mlx5_hlist_entry *entry)
10040 {
10041         struct mlx5_dev_ctx_shared *sh = list->ctx;
10042         struct mlx5_flow_tbl_data_entry *tbl_data =
10043                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10044
10045         MLX5_ASSERT(entry && sh);
10046         if (tbl_data->jump.action)
10047                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10048         if (tbl_data->tbl.obj)
10049                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
10050         if (tbl_data->tunnel_offload && tbl_data->external) {
10051                 struct mlx5_hlist_entry *he;
10052                 struct mlx5_hlist *tunnel_grp_hash;
10053                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
10054                 union tunnel_tbl_key tunnel_key = {
10055                         .tunnel_id = tbl_data->tunnel ?
10056                                         tbl_data->tunnel->tunnel_id : 0,
10057                         .group = tbl_data->group_id
10058                 };
10059                 uint32_t table_level = tbl_data->level;
10060
10061                 tunnel_grp_hash = tbl_data->tunnel ?
10062                                         tbl_data->tunnel->groups :
10063                                         thub->groups;
10064                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
10065                 if (he)
10066                         mlx5_hlist_unregister(tunnel_grp_hash, he);
10067                 DRV_LOG(DEBUG,
10068                         "table_level %u id %u tunnel %u group %u released.",
10069                         table_level,
10070                         tbl_data->id,
10071                         tbl_data->tunnel ?
10072                         tbl_data->tunnel->tunnel_id : 0,
10073                         tbl_data->group_id);
10074         }
10075         mlx5_cache_list_destroy(&tbl_data->matchers);
10076         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10077 }
10078
10079 /**
10080  * Release a flow table.
10081  *
10082  * @param[in] sh
10083  *   Pointer to device shared structure.
10084  * @param[in] tbl
10085  *   Table resource to be released.
10086  *
10087  * @return
10088  *   Returns 0 if table was released, else return 1;
10089  */
10090 static int
10091 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
10092                              struct mlx5_flow_tbl_resource *tbl)
10093 {
10094         struct mlx5_flow_tbl_data_entry *tbl_data =
10095                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10096
10097         if (!tbl)
10098                 return 0;
10099         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
10100 }
10101
10102 int
10103 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
10104                          struct mlx5_cache_entry *entry, void *cb_ctx)
10105 {
10106         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10107         struct mlx5_flow_dv_matcher *ref = ctx->data;
10108         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
10109                                                         entry);
10110
10111         return cur->crc != ref->crc ||
10112                cur->priority != ref->priority ||
10113                memcmp((const void *)cur->mask.buf,
10114                       (const void *)ref->mask.buf, ref->mask.size);
10115 }
10116
10117 struct mlx5_cache_entry *
10118 flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
10119                           struct mlx5_cache_entry *entry __rte_unused,
10120                           void *cb_ctx)
10121 {
10122         struct mlx5_dev_ctx_shared *sh = list->ctx;
10123         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10124         struct mlx5_flow_dv_matcher *ref = ctx->data;
10125         struct mlx5_flow_dv_matcher *cache;
10126         struct mlx5dv_flow_matcher_attr dv_attr = {
10127                 .type = IBV_FLOW_ATTR_NORMAL,
10128                 .match_mask = (void *)&ref->mask,
10129         };
10130         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10131                                                             typeof(*tbl), tbl);
10132         int ret;
10133
10134         cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
10135         if (!cache) {
10136                 rte_flow_error_set(ctx->error, ENOMEM,
10137                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10138                                    "cannot create matcher");
10139                 return NULL;
10140         }
10141         *cache = *ref;
10142         dv_attr.match_criteria_enable =
10143                 flow_dv_matcher_enable(cache->mask.buf);
10144         dv_attr.priority = ref->priority;
10145         if (tbl->is_egress)
10146                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
10147         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
10148                                                &cache->matcher_object);
10149         if (ret) {
10150                 mlx5_free(cache);
10151                 rte_flow_error_set(ctx->error, ENOMEM,
10152                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10153                                    "cannot create matcher");
10154                 return NULL;
10155         }
10156         return &cache->entry;
10157 }
10158
10159 /**
10160  * Register the flow matcher.
10161  *
10162  * @param[in, out] dev
10163  *   Pointer to rte_eth_dev structure.
10164  * @param[in, out] matcher
10165  *   Pointer to flow matcher.
10166  * @param[in, out] key
10167  *   Pointer to flow table key.
10168  * @parm[in, out] dev_flow
10169  *   Pointer to the dev_flow.
10170  * @param[out] error
10171  *   pointer to error structure.
10172  *
10173  * @return
10174  *   0 on success otherwise -errno and errno is set.
10175  */
10176 static int
10177 flow_dv_matcher_register(struct rte_eth_dev *dev,
10178                          struct mlx5_flow_dv_matcher *ref,
10179                          union mlx5_flow_tbl_key *key,
10180                          struct mlx5_flow *dev_flow,
10181                          const struct mlx5_flow_tunnel *tunnel,
10182                          uint32_t group_id,
10183                          struct rte_flow_error *error)
10184 {
10185         struct mlx5_cache_entry *entry;
10186         struct mlx5_flow_dv_matcher *cache;
10187         struct mlx5_flow_tbl_resource *tbl;
10188         struct mlx5_flow_tbl_data_entry *tbl_data;
10189         struct mlx5_flow_cb_ctx ctx = {
10190                 .error = error,
10191                 .data = ref,
10192         };
10193
10194         /**
10195          * tunnel offload API requires this registration for cases when
10196          * tunnel match rule was inserted before tunnel set rule.
10197          */
10198         tbl = flow_dv_tbl_resource_get(dev, key->level,
10199                                        key->is_egress, key->is_fdb,
10200                                        dev_flow->external, tunnel,
10201                                        group_id, 0, key->id, error);
10202         if (!tbl)
10203                 return -rte_errno;      /* No need to refill the error info */
10204         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10205         ref->tbl = tbl;
10206         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
10207         if (!entry) {
10208                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10209                 return rte_flow_error_set(error, ENOMEM,
10210                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10211                                           "cannot allocate ref memory");
10212         }
10213         cache = container_of(entry, typeof(*cache), entry);
10214         dev_flow->handle->dvh.matcher = cache;
10215         return 0;
10216 }
10217
10218 struct mlx5_hlist_entry *
10219 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
10220 {
10221         struct mlx5_dev_ctx_shared *sh = list->ctx;
10222         struct rte_flow_error *error = ctx;
10223         struct mlx5_flow_dv_tag_resource *entry;
10224         uint32_t idx = 0;
10225         int ret;
10226
10227         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10228         if (!entry) {
10229                 rte_flow_error_set(error, ENOMEM,
10230                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10231                                    "cannot allocate resource memory");
10232                 return NULL;
10233         }
10234         entry->idx = idx;
10235         entry->tag_id = key;
10236         ret = mlx5_flow_os_create_flow_action_tag(key,
10237                                                   &entry->action);
10238         if (ret) {
10239                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
10240                 rte_flow_error_set(error, ENOMEM,
10241                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10242                                    NULL, "cannot create action");
10243                 return NULL;
10244         }
10245         return &entry->entry;
10246 }
10247
10248 int
10249 flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused,
10250                      struct mlx5_hlist_entry *entry, uint64_t key,
10251                      void *cb_ctx __rte_unused)
10252 {
10253         struct mlx5_flow_dv_tag_resource *tag =
10254                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10255
10256         return key != tag->tag_id;
10257 }
10258
10259 /**
10260  * Find existing tag resource or create and register a new one.
10261  *
10262  * @param dev[in, out]
10263  *   Pointer to rte_eth_dev structure.
10264  * @param[in, out] tag_be24
10265  *   Tag value in big endian then R-shift 8.
10266  * @parm[in, out] dev_flow
10267  *   Pointer to the dev_flow.
10268  * @param[out] error
10269  *   pointer to error structure.
10270  *
10271  * @return
10272  *   0 on success otherwise -errno and errno is set.
10273  */
10274 static int
10275 flow_dv_tag_resource_register
10276                         (struct rte_eth_dev *dev,
10277                          uint32_t tag_be24,
10278                          struct mlx5_flow *dev_flow,
10279                          struct rte_flow_error *error)
10280 {
10281         struct mlx5_priv *priv = dev->data->dev_private;
10282         struct mlx5_flow_dv_tag_resource *cache_resource;
10283         struct mlx5_hlist_entry *entry;
10284
10285         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
10286         if (entry) {
10287                 cache_resource = container_of
10288                         (entry, struct mlx5_flow_dv_tag_resource, entry);
10289                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
10290                 dev_flow->dv.tag_resource = cache_resource;
10291                 return 0;
10292         }
10293         return -rte_errno;
10294 }
10295
10296 void
10297 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
10298                       struct mlx5_hlist_entry *entry)
10299 {
10300         struct mlx5_dev_ctx_shared *sh = list->ctx;
10301         struct mlx5_flow_dv_tag_resource *tag =
10302                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10303
10304         MLX5_ASSERT(tag && sh && tag->action);
10305         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
10306         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
10307         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10308 }
10309
10310 /**
10311  * Release the tag.
10312  *
10313  * @param dev
10314  *   Pointer to Ethernet device.
10315  * @param tag_idx
10316  *   Tag index.
10317  *
10318  * @return
10319  *   1 while a reference on it exists, 0 when freed.
10320  */
10321 static int
10322 flow_dv_tag_release(struct rte_eth_dev *dev,
10323                     uint32_t tag_idx)
10324 {
10325         struct mlx5_priv *priv = dev->data->dev_private;
10326         struct mlx5_flow_dv_tag_resource *tag;
10327
10328         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
10329         if (!tag)
10330                 return 0;
10331         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
10332                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
10333         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
10334 }
10335
10336 /**
10337  * Translate port ID action to vport.
10338  *
10339  * @param[in] dev
10340  *   Pointer to rte_eth_dev structure.
10341  * @param[in] action
10342  *   Pointer to the port ID action.
10343  * @param[out] dst_port_id
10344  *   The target port ID.
10345  * @param[out] error
10346  *   Pointer to the error structure.
10347  *
10348  * @return
10349  *   0 on success, a negative errno value otherwise and rte_errno is set.
10350  */
10351 static int
10352 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
10353                                  const struct rte_flow_action *action,
10354                                  uint32_t *dst_port_id,
10355                                  struct rte_flow_error *error)
10356 {
10357         uint32_t port;
10358         struct mlx5_priv *priv;
10359         const struct rte_flow_action_port_id *conf =
10360                         (const struct rte_flow_action_port_id *)action->conf;
10361
10362         port = conf->original ? dev->data->port_id : conf->id;
10363         priv = mlx5_port_to_eswitch_info(port, false);
10364         if (!priv)
10365                 return rte_flow_error_set(error, -rte_errno,
10366                                           RTE_FLOW_ERROR_TYPE_ACTION,
10367                                           NULL,
10368                                           "No eswitch info was found for port");
10369 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
10370         /*
10371          * This parameter is transferred to
10372          * mlx5dv_dr_action_create_dest_ib_port().
10373          */
10374         *dst_port_id = priv->dev_port;
10375 #else
10376         /*
10377          * Legacy mode, no LAG configurations is supported.
10378          * This parameter is transferred to
10379          * mlx5dv_dr_action_create_dest_vport().
10380          */
10381         *dst_port_id = priv->vport_id;
10382 #endif
10383         return 0;
10384 }
10385
10386 /**
10387  * Create a counter with aging configuration.
10388  *
10389  * @param[in] dev
10390  *   Pointer to rte_eth_dev structure.
10391  * @param[in] dev_flow
10392  *   Pointer to the mlx5_flow.
10393  * @param[out] count
10394  *   Pointer to the counter action configuration.
10395  * @param[in] age
10396  *   Pointer to the aging action configuration.
10397  *
10398  * @return
10399  *   Index to flow counter on success, 0 otherwise.
10400  */
10401 static uint32_t
10402 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
10403                                 struct mlx5_flow *dev_flow,
10404                                 const struct rte_flow_action_count *count,
10405                                 const struct rte_flow_action_age *age)
10406 {
10407         uint32_t counter;
10408         struct mlx5_age_param *age_param;
10409
10410         if (count && count->shared)
10411                 counter = flow_dv_counter_get_shared(dev, count->id);
10412         else
10413                 counter = flow_dv_counter_alloc(dev, !!age);
10414         if (!counter || age == NULL)
10415                 return counter;
10416         age_param = flow_dv_counter_idx_get_age(dev, counter);
10417         age_param->context = age->context ? age->context :
10418                 (void *)(uintptr_t)(dev_flow->flow_idx);
10419         age_param->timeout = age->timeout;
10420         age_param->port_id = dev->data->port_id;
10421         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
10422         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
10423         return counter;
10424 }
10425
10426 /**
10427  * Add Tx queue matcher
10428  *
10429  * @param[in] dev
10430  *   Pointer to the dev struct.
10431  * @param[in, out] matcher
10432  *   Flow matcher.
10433  * @param[in, out] key
10434  *   Flow matcher value.
10435  * @param[in] item
10436  *   Flow pattern to translate.
10437  * @param[in] inner
10438  *   Item is inner pattern.
10439  */
10440 static void
10441 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
10442                                 void *matcher, void *key,
10443                                 const struct rte_flow_item *item)
10444 {
10445         const struct mlx5_rte_flow_item_tx_queue *queue_m;
10446         const struct mlx5_rte_flow_item_tx_queue *queue_v;
10447         void *misc_m =
10448                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
10449         void *misc_v =
10450                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
10451         struct mlx5_txq_ctrl *txq;
10452         uint32_t queue;
10453
10454
10455         queue_m = (const void *)item->mask;
10456         if (!queue_m)
10457                 return;
10458         queue_v = (const void *)item->spec;
10459         if (!queue_v)
10460                 return;
10461         txq = mlx5_txq_get(dev, queue_v->queue);
10462         if (!txq)
10463                 return;
10464         queue = txq->obj->sq->id;
10465         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
10466         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
10467                  queue & queue_m->queue);
10468         mlx5_txq_release(dev, queue_v->queue);
10469 }
10470
10471 /**
10472  * Set the hash fields according to the @p flow information.
10473  *
10474  * @param[in] dev_flow
10475  *   Pointer to the mlx5_flow.
10476  * @param[in] rss_desc
10477  *   Pointer to the mlx5_flow_rss_desc.
10478  */
10479 static void
10480 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
10481                        struct mlx5_flow_rss_desc *rss_desc)
10482 {
10483         uint64_t items = dev_flow->handle->layers;
10484         int rss_inner = 0;
10485         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
10486
10487         dev_flow->hash_fields = 0;
10488 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
10489         if (rss_desc->level >= 2) {
10490                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
10491                 rss_inner = 1;
10492         }
10493 #endif
10494         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
10495             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
10496                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
10497                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10498                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
10499                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10500                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
10501                         else
10502                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
10503                 }
10504         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
10505                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
10506                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
10507                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10508                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
10509                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10510                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
10511                         else
10512                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
10513                 }
10514         }
10515         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
10516             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
10517                 if (rss_types & ETH_RSS_UDP) {
10518                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10519                                 dev_flow->hash_fields |=
10520                                                 IBV_RX_HASH_SRC_PORT_UDP;
10521                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10522                                 dev_flow->hash_fields |=
10523                                                 IBV_RX_HASH_DST_PORT_UDP;
10524                         else
10525                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
10526                 }
10527         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
10528                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
10529                 if (rss_types & ETH_RSS_TCP) {
10530                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10531                                 dev_flow->hash_fields |=
10532                                                 IBV_RX_HASH_SRC_PORT_TCP;
10533                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10534                                 dev_flow->hash_fields |=
10535                                                 IBV_RX_HASH_DST_PORT_TCP;
10536                         else
10537                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
10538                 }
10539         }
10540 }
10541
10542 /**
10543  * Prepare an Rx Hash queue.
10544  *
10545  * @param dev
10546  *   Pointer to Ethernet device.
10547  * @param[in] dev_flow
10548  *   Pointer to the mlx5_flow.
10549  * @param[in] rss_desc
10550  *   Pointer to the mlx5_flow_rss_desc.
10551  * @param[out] hrxq_idx
10552  *   Hash Rx queue index.
10553  *
10554  * @return
10555  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
10556  */
10557 static struct mlx5_hrxq *
10558 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
10559                      struct mlx5_flow *dev_flow,
10560                      struct mlx5_flow_rss_desc *rss_desc,
10561                      uint32_t *hrxq_idx)
10562 {
10563         struct mlx5_priv *priv = dev->data->dev_private;
10564         struct mlx5_flow_handle *dh = dev_flow->handle;
10565         struct mlx5_hrxq *hrxq;
10566
10567         MLX5_ASSERT(rss_desc->queue_num);
10568         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
10569         rss_desc->hash_fields = dev_flow->hash_fields;
10570         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
10571         rss_desc->shared_rss = 0;
10572         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
10573         if (!*hrxq_idx)
10574                 return NULL;
10575         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10576                               *hrxq_idx);
10577         return hrxq;
10578 }
10579
10580 /**
10581  * Release sample sub action resource.
10582  *
10583  * @param[in, out] dev
10584  *   Pointer to rte_eth_dev structure.
10585  * @param[in] act_res
10586  *   Pointer to sample sub action resource.
10587  */
10588 static void
10589 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
10590                                    struct mlx5_flow_sub_actions_idx *act_res)
10591 {
10592         if (act_res->rix_hrxq) {
10593                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
10594                 act_res->rix_hrxq = 0;
10595         }
10596         if (act_res->rix_encap_decap) {
10597                 flow_dv_encap_decap_resource_release(dev,
10598                                                      act_res->rix_encap_decap);
10599                 act_res->rix_encap_decap = 0;
10600         }
10601         if (act_res->rix_port_id_action) {
10602                 flow_dv_port_id_action_resource_release(dev,
10603                                                 act_res->rix_port_id_action);
10604                 act_res->rix_port_id_action = 0;
10605         }
10606         if (act_res->rix_tag) {
10607                 flow_dv_tag_release(dev, act_res->rix_tag);
10608                 act_res->rix_tag = 0;
10609         }
10610         if (act_res->rix_jump) {
10611                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
10612                 act_res->rix_jump = 0;
10613         }
10614 }
10615
10616 int
10617 flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused,
10618                         struct mlx5_cache_entry *entry, void *cb_ctx)
10619 {
10620         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10621         struct rte_eth_dev *dev = ctx->dev;
10622         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
10623         struct mlx5_flow_dv_sample_resource *cache_resource =
10624                         container_of(entry, typeof(*cache_resource), entry);
10625
10626         if (resource->ratio == cache_resource->ratio &&
10627             resource->ft_type == cache_resource->ft_type &&
10628             resource->ft_id == cache_resource->ft_id &&
10629             resource->set_action == cache_resource->set_action &&
10630             !memcmp((void *)&resource->sample_act,
10631                     (void *)&cache_resource->sample_act,
10632                     sizeof(struct mlx5_flow_sub_actions_list))) {
10633                 /*
10634                  * Existing sample action should release the prepared
10635                  * sub-actions reference counter.
10636                  */
10637                 flow_dv_sample_sub_actions_release(dev,
10638                                                 &resource->sample_idx);
10639                 return 0;
10640         }
10641         return 1;
10642 }
10643
10644 struct mlx5_cache_entry *
10645 flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
10646                          struct mlx5_cache_entry *entry __rte_unused,
10647                          void *cb_ctx)
10648 {
10649         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10650         struct rte_eth_dev *dev = ctx->dev;
10651         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
10652         void **sample_dv_actions = resource->sub_actions;
10653         struct mlx5_flow_dv_sample_resource *cache_resource;
10654         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
10655         struct mlx5_priv *priv = dev->data->dev_private;
10656         struct mlx5_dev_ctx_shared *sh = priv->sh;
10657         struct mlx5_flow_tbl_resource *tbl;
10658         uint32_t idx = 0;
10659         const uint32_t next_ft_step = 1;
10660         uint32_t next_ft_id = resource->ft_id + next_ft_step;
10661         uint8_t is_egress = 0;
10662         uint8_t is_transfer = 0;
10663         struct rte_flow_error *error = ctx->error;
10664
10665         /* Register new sample resource. */
10666         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
10667         if (!cache_resource) {
10668                 rte_flow_error_set(error, ENOMEM,
10669                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10670                                           NULL,
10671                                           "cannot allocate resource memory");
10672                 return NULL;
10673         }
10674         *cache_resource = *resource;
10675         /* Create normal path table level */
10676         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10677                 is_transfer = 1;
10678         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
10679                 is_egress = 1;
10680         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
10681                                         is_egress, is_transfer,
10682                                         true, NULL, 0, 0, 0, error);
10683         if (!tbl) {
10684                 rte_flow_error_set(error, ENOMEM,
10685                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10686                                           NULL,
10687                                           "fail to create normal path table "
10688                                           "for sample");
10689                 goto error;
10690         }
10691         cache_resource->normal_path_tbl = tbl;
10692         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
10693                 if (!sh->default_miss_action) {
10694                         rte_flow_error_set(error, ENOMEM,
10695                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10696                                                 NULL,
10697                                                 "default miss action was not "
10698                                                 "created");
10699                         goto error;
10700                 }
10701                 sample_dv_actions[resource->sample_act.actions_num++] =
10702                                                 sh->default_miss_action;
10703         }
10704         /* Create a DR sample action */
10705         sampler_attr.sample_ratio = cache_resource->ratio;
10706         sampler_attr.default_next_table = tbl->obj;
10707         sampler_attr.num_sample_actions = resource->sample_act.actions_num;
10708         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
10709                                                         &sample_dv_actions[0];
10710         sampler_attr.action = cache_resource->set_action;
10711         if (mlx5_os_flow_dr_create_flow_action_sampler
10712                         (&sampler_attr, &cache_resource->verbs_action)) {
10713                 rte_flow_error_set(error, ENOMEM,
10714                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10715                                         NULL, "cannot create sample action");
10716                 goto error;
10717         }
10718         cache_resource->idx = idx;
10719         cache_resource->dev = dev;
10720         return &cache_resource->entry;
10721 error:
10722         if (cache_resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
10723                 flow_dv_sample_sub_actions_release(dev,
10724                                                    &cache_resource->sample_idx);
10725         if (cache_resource->normal_path_tbl)
10726                 flow_dv_tbl_resource_release(MLX5_SH(dev),
10727                                 cache_resource->normal_path_tbl);
10728         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
10729         return NULL;
10730
10731 }
10732
10733 /**
10734  * Find existing sample resource or create and register a new one.
10735  *
10736  * @param[in, out] dev
10737  *   Pointer to rte_eth_dev structure.
10738  * @param[in] resource
10739  *   Pointer to sample resource.
10740  * @parm[in, out] dev_flow
10741  *   Pointer to the dev_flow.
10742  * @param[out] error
10743  *   pointer to error structure.
10744  *
10745  * @return
10746  *   0 on success otherwise -errno and errno is set.
10747  */
10748 static int
10749 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
10750                          struct mlx5_flow_dv_sample_resource *resource,
10751                          struct mlx5_flow *dev_flow,
10752                          struct rte_flow_error *error)
10753 {
10754         struct mlx5_flow_dv_sample_resource *cache_resource;
10755         struct mlx5_cache_entry *entry;
10756         struct mlx5_priv *priv = dev->data->dev_private;
10757         struct mlx5_flow_cb_ctx ctx = {
10758                 .dev = dev,
10759                 .error = error,
10760                 .data = resource,
10761         };
10762
10763         entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
10764         if (!entry)
10765                 return -rte_errno;
10766         cache_resource = container_of(entry, typeof(*cache_resource), entry);
10767         dev_flow->handle->dvh.rix_sample = cache_resource->idx;
10768         dev_flow->dv.sample_res = cache_resource;
10769         return 0;
10770 }
10771
10772 int
10773 flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
10774                             struct mlx5_cache_entry *entry, void *cb_ctx)
10775 {
10776         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10777         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
10778         struct rte_eth_dev *dev = ctx->dev;
10779         struct mlx5_flow_dv_dest_array_resource *cache_resource =
10780                         container_of(entry, typeof(*cache_resource), entry);
10781         uint32_t idx = 0;
10782
10783         if (resource->num_of_dest == cache_resource->num_of_dest &&
10784             resource->ft_type == cache_resource->ft_type &&
10785             !memcmp((void *)cache_resource->sample_act,
10786                     (void *)resource->sample_act,
10787                    (resource->num_of_dest *
10788                    sizeof(struct mlx5_flow_sub_actions_list)))) {
10789                 /*
10790                  * Existing sample action should release the prepared
10791                  * sub-actions reference counter.
10792                  */
10793                 for (idx = 0; idx < resource->num_of_dest; idx++)
10794                         flow_dv_sample_sub_actions_release(dev,
10795                                         &resource->sample_idx[idx]);
10796                 return 0;
10797         }
10798         return 1;
10799 }
10800
10801 struct mlx5_cache_entry *
10802 flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
10803                          struct mlx5_cache_entry *entry __rte_unused,
10804                          void *cb_ctx)
10805 {
10806         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10807         struct rte_eth_dev *dev = ctx->dev;
10808         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10809         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
10810         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
10811         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
10812         struct mlx5_priv *priv = dev->data->dev_private;
10813         struct mlx5_dev_ctx_shared *sh = priv->sh;
10814         struct mlx5_flow_sub_actions_list *sample_act;
10815         struct mlx5dv_dr_domain *domain;
10816         uint32_t idx = 0, res_idx = 0;
10817         struct rte_flow_error *error = ctx->error;
10818         uint64_t action_flags;
10819         int ret;
10820
10821         /* Register new destination array resource. */
10822         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10823                                             &res_idx);
10824         if (!cache_resource) {
10825                 rte_flow_error_set(error, ENOMEM,
10826                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10827                                           NULL,
10828                                           "cannot allocate resource memory");
10829                 return NULL;
10830         }
10831         *cache_resource = *resource;
10832         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10833                 domain = sh->fdb_domain;
10834         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
10835                 domain = sh->rx_domain;
10836         else
10837                 domain = sh->tx_domain;
10838         for (idx = 0; idx < resource->num_of_dest; idx++) {
10839                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
10840                                  mlx5_malloc(MLX5_MEM_ZERO,
10841                                  sizeof(struct mlx5dv_dr_action_dest_attr),
10842                                  0, SOCKET_ID_ANY);
10843                 if (!dest_attr[idx]) {
10844                         rte_flow_error_set(error, ENOMEM,
10845                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10846                                            NULL,
10847                                            "cannot allocate resource memory");
10848                         goto error;
10849                 }
10850                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
10851                 sample_act = &resource->sample_act[idx];
10852                 action_flags = sample_act->action_flags;
10853                 switch (action_flags) {
10854                 case MLX5_FLOW_ACTION_QUEUE:
10855                         dest_attr[idx]->dest = sample_act->dr_queue_action;
10856                         break;
10857                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
10858                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
10859                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
10860                         dest_attr[idx]->dest_reformat->reformat =
10861                                         sample_act->dr_encap_action;
10862                         dest_attr[idx]->dest_reformat->dest =
10863                                         sample_act->dr_port_id_action;
10864                         break;
10865                 case MLX5_FLOW_ACTION_PORT_ID:
10866                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
10867                         break;
10868                 case MLX5_FLOW_ACTION_JUMP:
10869                         dest_attr[idx]->dest = sample_act->dr_jump_action;
10870                         break;
10871                 default:
10872                         rte_flow_error_set(error, EINVAL,
10873                                            RTE_FLOW_ERROR_TYPE_ACTION,
10874                                            NULL,
10875                                            "unsupported actions type");
10876                         goto error;
10877                 }
10878         }
10879         /* create a dest array actioin */
10880         ret = mlx5_os_flow_dr_create_flow_action_dest_array
10881                                                 (domain,
10882                                                  cache_resource->num_of_dest,
10883                                                  dest_attr,
10884                                                  &cache_resource->action);
10885         if (ret) {
10886                 rte_flow_error_set(error, ENOMEM,
10887                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10888                                    NULL,
10889                                    "cannot create destination array action");
10890                 goto error;
10891         }
10892         cache_resource->idx = res_idx;
10893         cache_resource->dev = dev;
10894         for (idx = 0; idx < resource->num_of_dest; idx++)
10895                 mlx5_free(dest_attr[idx]);
10896         return &cache_resource->entry;
10897 error:
10898         for (idx = 0; idx < resource->num_of_dest; idx++) {
10899                 flow_dv_sample_sub_actions_release(dev,
10900                                 &cache_resource->sample_idx[idx]);
10901                 if (dest_attr[idx])
10902                         mlx5_free(dest_attr[idx]);
10903         }
10904
10905         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
10906         return NULL;
10907 }
10908
10909 /**
10910  * Find existing destination array resource or create and register a new one.
10911  *
10912  * @param[in, out] dev
10913  *   Pointer to rte_eth_dev structure.
10914  * @param[in] resource
10915  *   Pointer to destination array resource.
10916  * @parm[in, out] dev_flow
10917  *   Pointer to the dev_flow.
10918  * @param[out] error
10919  *   pointer to error structure.
10920  *
10921  * @return
10922  *   0 on success otherwise -errno and errno is set.
10923  */
10924 static int
10925 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
10926                          struct mlx5_flow_dv_dest_array_resource *resource,
10927                          struct mlx5_flow *dev_flow,
10928                          struct rte_flow_error *error)
10929 {
10930         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10931         struct mlx5_priv *priv = dev->data->dev_private;
10932         struct mlx5_cache_entry *entry;
10933         struct mlx5_flow_cb_ctx ctx = {
10934                 .dev = dev,
10935                 .error = error,
10936                 .data = resource,
10937         };
10938
10939         entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
10940         if (!entry)
10941                 return -rte_errno;
10942         cache_resource = container_of(entry, typeof(*cache_resource), entry);
10943         dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
10944         dev_flow->dv.dest_array_res = cache_resource;
10945         return 0;
10946 }
10947
10948 /**
10949  * Convert Sample action to DV specification.
10950  *
10951  * @param[in] dev
10952  *   Pointer to rte_eth_dev structure.
10953  * @param[in] action
10954  *   Pointer to sample action structure.
10955  * @param[in, out] dev_flow
10956  *   Pointer to the mlx5_flow.
10957  * @param[in] attr
10958  *   Pointer to the flow attributes.
10959  * @param[in, out] num_of_dest
10960  *   Pointer to the num of destination.
10961  * @param[in, out] sample_actions
10962  *   Pointer to sample actions list.
10963  * @param[in, out] res
10964  *   Pointer to sample resource.
10965  * @param[out] error
10966  *   Pointer to the error structure.
10967  *
10968  * @return
10969  *   0 on success, a negative errno value otherwise and rte_errno is set.
10970  */
10971 static int
10972 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
10973                                 const struct rte_flow_action_sample *action,
10974                                 struct mlx5_flow *dev_flow,
10975                                 const struct rte_flow_attr *attr,
10976                                 uint32_t *num_of_dest,
10977                                 void **sample_actions,
10978                                 struct mlx5_flow_dv_sample_resource *res,
10979                                 struct rte_flow_error *error)
10980 {
10981         struct mlx5_priv *priv = dev->data->dev_private;
10982         const struct rte_flow_action *sub_actions;
10983         struct mlx5_flow_sub_actions_list *sample_act;
10984         struct mlx5_flow_sub_actions_idx *sample_idx;
10985         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10986         struct rte_flow *flow = dev_flow->flow;
10987         struct mlx5_flow_rss_desc *rss_desc;
10988         uint64_t action_flags = 0;
10989
10990         MLX5_ASSERT(wks);
10991         rss_desc = &wks->rss_desc;
10992         sample_act = &res->sample_act;
10993         sample_idx = &res->sample_idx;
10994         res->ratio = action->ratio;
10995         sub_actions = action->actions;
10996         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
10997                 int type = sub_actions->type;
10998                 uint32_t pre_rix = 0;
10999                 void *pre_r;
11000                 switch (type) {
11001                 case RTE_FLOW_ACTION_TYPE_QUEUE:
11002                 {
11003                         const struct rte_flow_action_queue *queue;
11004                         struct mlx5_hrxq *hrxq;
11005                         uint32_t hrxq_idx;
11006
11007                         queue = sub_actions->conf;
11008                         rss_desc->queue_num = 1;
11009                         rss_desc->queue[0] = queue->index;
11010                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11011                                                     rss_desc, &hrxq_idx);
11012                         if (!hrxq)
11013                                 return rte_flow_error_set
11014                                         (error, rte_errno,
11015                                          RTE_FLOW_ERROR_TYPE_ACTION,
11016                                          NULL,
11017                                          "cannot create fate queue");
11018                         sample_act->dr_queue_action = hrxq->action;
11019                         sample_idx->rix_hrxq = hrxq_idx;
11020                         sample_actions[sample_act->actions_num++] =
11021                                                 hrxq->action;
11022                         (*num_of_dest)++;
11023                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
11024                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11025                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11026                         dev_flow->handle->fate_action =
11027                                         MLX5_FLOW_FATE_QUEUE;
11028                         break;
11029                 }
11030                 case RTE_FLOW_ACTION_TYPE_RSS:
11031                 {
11032                         struct mlx5_hrxq *hrxq;
11033                         uint32_t hrxq_idx;
11034                         const struct rte_flow_action_rss *rss;
11035                         const uint8_t *rss_key;
11036
11037                         rss = sub_actions->conf;
11038                         memcpy(rss_desc->queue, rss->queue,
11039                                rss->queue_num * sizeof(uint16_t));
11040                         rss_desc->queue_num = rss->queue_num;
11041                         /* NULL RSS key indicates default RSS key. */
11042                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11043                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11044                         /*
11045                          * rss->level and rss.types should be set in advance
11046                          * when expanding items for RSS.
11047                          */
11048                         flow_dv_hashfields_set(dev_flow, rss_desc);
11049                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11050                                                     rss_desc, &hrxq_idx);
11051                         if (!hrxq)
11052                                 return rte_flow_error_set
11053                                         (error, rte_errno,
11054                                          RTE_FLOW_ERROR_TYPE_ACTION,
11055                                          NULL,
11056                                          "cannot create fate queue");
11057                         sample_act->dr_queue_action = hrxq->action;
11058                         sample_idx->rix_hrxq = hrxq_idx;
11059                         sample_actions[sample_act->actions_num++] =
11060                                                 hrxq->action;
11061                         (*num_of_dest)++;
11062                         action_flags |= MLX5_FLOW_ACTION_RSS;
11063                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11064                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11065                         dev_flow->handle->fate_action =
11066                                         MLX5_FLOW_FATE_QUEUE;
11067                         break;
11068                 }
11069                 case RTE_FLOW_ACTION_TYPE_MARK:
11070                 {
11071                         uint32_t tag_be = mlx5_flow_mark_set
11072                                 (((const struct rte_flow_action_mark *)
11073                                 (sub_actions->conf))->id);
11074
11075                         dev_flow->handle->mark = 1;
11076                         pre_rix = dev_flow->handle->dvh.rix_tag;
11077                         /* Save the mark resource before sample */
11078                         pre_r = dev_flow->dv.tag_resource;
11079                         if (flow_dv_tag_resource_register(dev, tag_be,
11080                                                   dev_flow, error))
11081                                 return -rte_errno;
11082                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11083                         sample_act->dr_tag_action =
11084                                 dev_flow->dv.tag_resource->action;
11085                         sample_idx->rix_tag =
11086                                 dev_flow->handle->dvh.rix_tag;
11087                         sample_actions[sample_act->actions_num++] =
11088                                                 sample_act->dr_tag_action;
11089                         /* Recover the mark resource after sample */
11090                         dev_flow->dv.tag_resource = pre_r;
11091                         dev_flow->handle->dvh.rix_tag = pre_rix;
11092                         action_flags |= MLX5_FLOW_ACTION_MARK;
11093                         break;
11094                 }
11095                 case RTE_FLOW_ACTION_TYPE_COUNT:
11096                 {
11097                         if (!flow->counter) {
11098                                 flow->counter =
11099                                         flow_dv_translate_create_counter(dev,
11100                                                 dev_flow, sub_actions->conf,
11101                                                 0);
11102                                 if (!flow->counter)
11103                                         return rte_flow_error_set
11104                                                 (error, rte_errno,
11105                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11106                                                 NULL,
11107                                                 "cannot create counter"
11108                                                 " object.");
11109                         }
11110                         sample_act->dr_cnt_action =
11111                                   (flow_dv_counter_get_by_idx(dev,
11112                                   flow->counter, NULL))->action;
11113                         sample_actions[sample_act->actions_num++] =
11114                                                 sample_act->dr_cnt_action;
11115                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11116                         break;
11117                 }
11118                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11119                 {
11120                         struct mlx5_flow_dv_port_id_action_resource
11121                                         port_id_resource;
11122                         uint32_t port_id = 0;
11123
11124                         memset(&port_id_resource, 0, sizeof(port_id_resource));
11125                         /* Save the port id resource before sample */
11126                         pre_rix = dev_flow->handle->rix_port_id_action;
11127                         pre_r = dev_flow->dv.port_id_action;
11128                         if (flow_dv_translate_action_port_id(dev, sub_actions,
11129                                                              &port_id, error))
11130                                 return -rte_errno;
11131                         port_id_resource.port_id = port_id;
11132                         if (flow_dv_port_id_action_resource_register
11133                             (dev, &port_id_resource, dev_flow, error))
11134                                 return -rte_errno;
11135                         sample_act->dr_port_id_action =
11136                                 dev_flow->dv.port_id_action->action;
11137                         sample_idx->rix_port_id_action =
11138                                 dev_flow->handle->rix_port_id_action;
11139                         sample_actions[sample_act->actions_num++] =
11140                                                 sample_act->dr_port_id_action;
11141                         /* Recover the port id resource after sample */
11142                         dev_flow->dv.port_id_action = pre_r;
11143                         dev_flow->handle->rix_port_id_action = pre_rix;
11144                         (*num_of_dest)++;
11145                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11146                         break;
11147                 }
11148                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11149                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11150                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11151                         /* Save the encap resource before sample */
11152                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
11153                         pre_r = dev_flow->dv.encap_decap;
11154                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
11155                                                            dev_flow,
11156                                                            attr->transfer,
11157                                                            error))
11158                                 return -rte_errno;
11159                         sample_act->dr_encap_action =
11160                                 dev_flow->dv.encap_decap->action;
11161                         sample_idx->rix_encap_decap =
11162                                 dev_flow->handle->dvh.rix_encap_decap;
11163                         sample_actions[sample_act->actions_num++] =
11164                                                 sample_act->dr_encap_action;
11165                         /* Recover the encap resource after sample */
11166                         dev_flow->dv.encap_decap = pre_r;
11167                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
11168                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11169                         break;
11170                 default:
11171                         return rte_flow_error_set(error, EINVAL,
11172                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11173                                 NULL,
11174                                 "Not support for sampler action");
11175                 }
11176         }
11177         sample_act->action_flags = action_flags;
11178         res->ft_id = dev_flow->dv.group;
11179         if (attr->transfer) {
11180                 union {
11181                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
11182                         uint64_t set_action;
11183                 } action_ctx = { .set_action = 0 };
11184
11185                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
11186                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
11187                          MLX5_MODIFICATION_TYPE_SET);
11188                 MLX5_SET(set_action_in, action_ctx.action_in, field,
11189                          MLX5_MODI_META_REG_C_0);
11190                 MLX5_SET(set_action_in, action_ctx.action_in, data,
11191                          priv->vport_meta_tag);
11192                 res->set_action = action_ctx.set_action;
11193         } else if (attr->ingress) {
11194                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11195         } else {
11196                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
11197         }
11198         return 0;
11199 }
11200
11201 /**
11202  * Convert Sample action to DV specification.
11203  *
11204  * @param[in] dev
11205  *   Pointer to rte_eth_dev structure.
11206  * @param[in, out] dev_flow
11207  *   Pointer to the mlx5_flow.
11208  * @param[in] num_of_dest
11209  *   The num of destination.
11210  * @param[in, out] res
11211  *   Pointer to sample resource.
11212  * @param[in, out] mdest_res
11213  *   Pointer to destination array resource.
11214  * @param[in] sample_actions
11215  *   Pointer to sample path actions list.
11216  * @param[in] action_flags
11217  *   Holds the actions detected until now.
11218  * @param[out] error
11219  *   Pointer to the error structure.
11220  *
11221  * @return
11222  *   0 on success, a negative errno value otherwise and rte_errno is set.
11223  */
11224 static int
11225 flow_dv_create_action_sample(struct rte_eth_dev *dev,
11226                              struct mlx5_flow *dev_flow,
11227                              uint32_t num_of_dest,
11228                              struct mlx5_flow_dv_sample_resource *res,
11229                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
11230                              void **sample_actions,
11231                              uint64_t action_flags,
11232                              struct rte_flow_error *error)
11233 {
11234         /* update normal path action resource into last index of array */
11235         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
11236         struct mlx5_flow_sub_actions_list *sample_act =
11237                                         &mdest_res->sample_act[dest_index];
11238         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11239         struct mlx5_flow_rss_desc *rss_desc;
11240         uint32_t normal_idx = 0;
11241         struct mlx5_hrxq *hrxq;
11242         uint32_t hrxq_idx;
11243
11244         MLX5_ASSERT(wks);
11245         rss_desc = &wks->rss_desc;
11246         if (num_of_dest > 1) {
11247                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
11248                         /* Handle QP action for mirroring */
11249                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11250                                                     rss_desc, &hrxq_idx);
11251                         if (!hrxq)
11252                                 return rte_flow_error_set
11253                                      (error, rte_errno,
11254                                       RTE_FLOW_ERROR_TYPE_ACTION,
11255                                       NULL,
11256                                       "cannot create rx queue");
11257                         normal_idx++;
11258                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
11259                         sample_act->dr_queue_action = hrxq->action;
11260                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11261                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11262                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11263                 }
11264                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
11265                         normal_idx++;
11266                         mdest_res->sample_idx[dest_index].rix_encap_decap =
11267                                 dev_flow->handle->dvh.rix_encap_decap;
11268                         sample_act->dr_encap_action =
11269                                 dev_flow->dv.encap_decap->action;
11270                         dev_flow->handle->dvh.rix_encap_decap = 0;
11271                 }
11272                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
11273                         normal_idx++;
11274                         mdest_res->sample_idx[dest_index].rix_port_id_action =
11275                                 dev_flow->handle->rix_port_id_action;
11276                         sample_act->dr_port_id_action =
11277                                 dev_flow->dv.port_id_action->action;
11278                         dev_flow->handle->rix_port_id_action = 0;
11279                 }
11280                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
11281                         normal_idx++;
11282                         mdest_res->sample_idx[dest_index].rix_jump =
11283                                 dev_flow->handle->rix_jump;
11284                         sample_act->dr_jump_action =
11285                                 dev_flow->dv.jump->action;
11286                         dev_flow->handle->rix_jump = 0;
11287                 }
11288                 sample_act->actions_num = normal_idx;
11289                 /* update sample action resource into first index of array */
11290                 mdest_res->ft_type = res->ft_type;
11291                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
11292                                 sizeof(struct mlx5_flow_sub_actions_idx));
11293                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
11294                                 sizeof(struct mlx5_flow_sub_actions_list));
11295                 mdest_res->num_of_dest = num_of_dest;
11296                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
11297                                                          dev_flow, error))
11298                         return rte_flow_error_set(error, EINVAL,
11299                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11300                                                   NULL, "can't create sample "
11301                                                   "action");
11302         } else {
11303                 res->sub_actions = sample_actions;
11304                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
11305                         return rte_flow_error_set(error, EINVAL,
11306                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11307                                                   NULL,
11308                                                   "can't create sample action");
11309         }
11310         return 0;
11311 }
11312
11313 /**
11314  * Remove an ASO age action from age actions list.
11315  *
11316  * @param[in] dev
11317  *   Pointer to the Ethernet device structure.
11318  * @param[in] age
11319  *   Pointer to the aso age action handler.
11320  */
11321 static void
11322 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
11323                                 struct mlx5_aso_age_action *age)
11324 {
11325         struct mlx5_age_info *age_info;
11326         struct mlx5_age_param *age_param = &age->age_params;
11327         struct mlx5_priv *priv = dev->data->dev_private;
11328         uint16_t expected = AGE_CANDIDATE;
11329
11330         age_info = GET_PORT_AGE_INFO(priv);
11331         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
11332                                          AGE_FREE, false, __ATOMIC_RELAXED,
11333                                          __ATOMIC_RELAXED)) {
11334                 /**
11335                  * We need the lock even it is age timeout,
11336                  * since age action may still in process.
11337                  */
11338                 rte_spinlock_lock(&age_info->aged_sl);
11339                 LIST_REMOVE(age, next);
11340                 rte_spinlock_unlock(&age_info->aged_sl);
11341                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
11342         }
11343 }
11344
11345 /**
11346  * Release an ASO age action.
11347  *
11348  * @param[in] dev
11349  *   Pointer to the Ethernet device structure.
11350  * @param[in] age_idx
11351  *   Index of ASO age action to release.
11352  * @param[in] flow
11353  *   True if the release operation is during flow destroy operation.
11354  *   False if the release operation is during action destroy operation.
11355  *
11356  * @return
11357  *   0 when age action was removed, otherwise the number of references.
11358  */
11359 static int
11360 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
11361 {
11362         struct mlx5_priv *priv = dev->data->dev_private;
11363         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11364         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
11365         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
11366
11367         if (!ret) {
11368                 flow_dv_aso_age_remove_from_age(dev, age);
11369                 rte_spinlock_lock(&mng->free_sl);
11370                 LIST_INSERT_HEAD(&mng->free, age, next);
11371                 rte_spinlock_unlock(&mng->free_sl);
11372         }
11373         return ret;
11374 }
11375
11376 /**
11377  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
11378  *
11379  * @param[in] dev
11380  *   Pointer to the Ethernet device structure.
11381  *
11382  * @return
11383  *   0 on success, otherwise negative errno value and rte_errno is set.
11384  */
11385 static int
11386 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
11387 {
11388         struct mlx5_priv *priv = dev->data->dev_private;
11389         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11390         void *old_pools = mng->pools;
11391         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
11392         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
11393         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11394
11395         if (!pools) {
11396                 rte_errno = ENOMEM;
11397                 return -ENOMEM;
11398         }
11399         if (old_pools) {
11400                 memcpy(pools, old_pools,
11401                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
11402                 mlx5_free(old_pools);
11403         } else {
11404                 /* First ASO flow hit allocation - starting ASO data-path. */
11405                 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
11406
11407                 if (ret) {
11408                         mlx5_free(pools);
11409                         return ret;
11410                 }
11411         }
11412         mng->n = resize;
11413         mng->pools = pools;
11414         return 0;
11415 }
11416
11417 /**
11418  * Create and initialize a new ASO aging pool.
11419  *
11420  * @param[in] dev
11421  *   Pointer to the Ethernet device structure.
11422  * @param[out] age_free
11423  *   Where to put the pointer of a new age action.
11424  *
11425  * @return
11426  *   The age actions pool pointer and @p age_free is set on success,
11427  *   NULL otherwise and rte_errno is set.
11428  */
11429 static struct mlx5_aso_age_pool *
11430 flow_dv_age_pool_create(struct rte_eth_dev *dev,
11431                         struct mlx5_aso_age_action **age_free)
11432 {
11433         struct mlx5_priv *priv = dev->data->dev_private;
11434         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11435         struct mlx5_aso_age_pool *pool = NULL;
11436         struct mlx5_devx_obj *obj = NULL;
11437         uint32_t i;
11438
11439         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
11440                                                     priv->sh->pdn);
11441         if (!obj) {
11442                 rte_errno = ENODATA;
11443                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
11444                 return NULL;
11445         }
11446         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
11447         if (!pool) {
11448                 claim_zero(mlx5_devx_cmd_destroy(obj));
11449                 rte_errno = ENOMEM;
11450                 return NULL;
11451         }
11452         pool->flow_hit_aso_obj = obj;
11453         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
11454         rte_spinlock_lock(&mng->resize_sl);
11455         pool->index = mng->next;
11456         /* Resize pools array if there is no room for the new pool in it. */
11457         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
11458                 claim_zero(mlx5_devx_cmd_destroy(obj));
11459                 mlx5_free(pool);
11460                 rte_spinlock_unlock(&mng->resize_sl);
11461                 return NULL;
11462         }
11463         mng->pools[pool->index] = pool;
11464         mng->next++;
11465         rte_spinlock_unlock(&mng->resize_sl);
11466         /* Assign the first action in the new pool, the rest go to free list. */
11467         *age_free = &pool->actions[0];
11468         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
11469                 pool->actions[i].offset = i;
11470                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
11471         }
11472         return pool;
11473 }
11474
11475 /**
11476  * Allocate a ASO aging bit.
11477  *
11478  * @param[in] dev
11479  *   Pointer to the Ethernet device structure.
11480  * @param[out] error
11481  *   Pointer to the error structure.
11482  *
11483  * @return
11484  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
11485  */
11486 static uint32_t
11487 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
11488 {
11489         struct mlx5_priv *priv = dev->data->dev_private;
11490         const struct mlx5_aso_age_pool *pool;
11491         struct mlx5_aso_age_action *age_free = NULL;
11492         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11493
11494         MLX5_ASSERT(mng);
11495         /* Try to get the next free age action bit. */
11496         rte_spinlock_lock(&mng->free_sl);
11497         age_free = LIST_FIRST(&mng->free);
11498         if (age_free) {
11499                 LIST_REMOVE(age_free, next);
11500         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
11501                 rte_spinlock_unlock(&mng->free_sl);
11502                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
11503                                    NULL, "failed to create ASO age pool");
11504                 return 0; /* 0 is an error. */
11505         }
11506         rte_spinlock_unlock(&mng->free_sl);
11507         pool = container_of
11508           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
11509                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
11510                                                                        actions);
11511         if (!age_free->dr_action) {
11512                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
11513                                                  error);
11514
11515                 if (reg_c < 0) {
11516                         rte_flow_error_set(error, rte_errno,
11517                                            RTE_FLOW_ERROR_TYPE_ACTION,
11518                                            NULL, "failed to get reg_c "
11519                                            "for ASO flow hit");
11520                         return 0; /* 0 is an error. */
11521                 }
11522 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
11523                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
11524                                 (priv->sh->rx_domain,
11525                                  pool->flow_hit_aso_obj->obj, age_free->offset,
11526                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
11527                                  (reg_c - REG_C_0));
11528 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
11529                 if (!age_free->dr_action) {
11530                         rte_errno = errno;
11531                         rte_spinlock_lock(&mng->free_sl);
11532                         LIST_INSERT_HEAD(&mng->free, age_free, next);
11533                         rte_spinlock_unlock(&mng->free_sl);
11534                         rte_flow_error_set(error, rte_errno,
11535                                            RTE_FLOW_ERROR_TYPE_ACTION,
11536                                            NULL, "failed to create ASO "
11537                                            "flow hit action");
11538                         return 0; /* 0 is an error. */
11539                 }
11540         }
11541         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
11542         return pool->index | ((age_free->offset + 1) << 16);
11543 }
11544
11545 /**
11546  * Initialize flow ASO age parameters.
11547  *
11548  * @param[in] dev
11549  *   Pointer to rte_eth_dev structure.
11550  * @param[in] age_idx
11551  *   Index of ASO age action.
11552  * @param[in] context
11553  *   Pointer to flow counter age context.
11554  * @param[in] timeout
11555  *   Aging timeout in seconds.
11556  *
11557  */
11558 static void
11559 flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
11560                             uint32_t age_idx,
11561                             void *context,
11562                             uint32_t timeout)
11563 {
11564         struct mlx5_aso_age_action *aso_age;
11565
11566         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
11567         MLX5_ASSERT(aso_age);
11568         aso_age->age_params.context = context;
11569         aso_age->age_params.timeout = timeout;
11570         aso_age->age_params.port_id = dev->data->port_id;
11571         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
11572                          __ATOMIC_RELAXED);
11573         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
11574                          __ATOMIC_RELAXED);
11575 }
11576
11577 static void
11578 flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
11579                                const struct rte_flow_item_integrity *value,
11580                                void *headers_m, void *headers_v)
11581 {
11582         if (mask->l4_ok) {
11583                 /* application l4_ok filter aggregates all hardware l4 filters
11584                  * therefore hw l4_checksum_ok must be implicitly added here.
11585                  */
11586                 struct rte_flow_item_integrity local_item;
11587
11588                 local_item.l4_csum_ok = 1;
11589                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
11590                          local_item.l4_csum_ok);
11591                 if (value->l4_ok) {
11592                         /* application l4_ok = 1 matches sets both hw flags
11593                          * l4_ok and l4_checksum_ok flags to 1.
11594                          */
11595                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11596                                  l4_checksum_ok, local_item.l4_csum_ok);
11597                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok,
11598                                  mask->l4_ok);
11599                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok,
11600                                  value->l4_ok);
11601                 } else {
11602                         /* application l4_ok = 0 matches on hw flag
11603                          * l4_checksum_ok = 0 only.
11604                          */
11605                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11606                                  l4_checksum_ok, 0);
11607                 }
11608         } else if (mask->l4_csum_ok) {
11609                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
11610                          mask->l4_csum_ok);
11611                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
11612                          value->l4_csum_ok);
11613         }
11614 }
11615
11616 static void
11617 flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
11618                                const struct rte_flow_item_integrity *value,
11619                                void *headers_m, void *headers_v,
11620                                bool is_ipv4)
11621 {
11622         if (mask->l3_ok) {
11623                 /* application l3_ok filter aggregates all hardware l3 filters
11624                  * therefore hw ipv4_checksum_ok must be implicitly added here.
11625                  */
11626                 struct rte_flow_item_integrity local_item;
11627
11628                 local_item.ipv4_csum_ok = !!is_ipv4;
11629                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
11630                          local_item.ipv4_csum_ok);
11631                 if (value->l3_ok) {
11632                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11633                                  ipv4_checksum_ok, local_item.ipv4_csum_ok);
11634                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok,
11635                                  mask->l3_ok);
11636                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
11637                                  value->l3_ok);
11638                 } else {
11639                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11640                                  ipv4_checksum_ok, 0);
11641                 }
11642         } else if (mask->ipv4_csum_ok) {
11643                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
11644                          mask->ipv4_csum_ok);
11645                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
11646                          value->ipv4_csum_ok);
11647         }
11648 }
11649
11650 static void
11651 flow_dv_translate_item_integrity(void *matcher, void *key,
11652                                  const struct rte_flow_item *head_item,
11653                                  const struct rte_flow_item *integrity_item)
11654 {
11655         const struct rte_flow_item_integrity *mask = integrity_item->mask;
11656         const struct rte_flow_item_integrity *value = integrity_item->spec;
11657         const struct rte_flow_item *tunnel_item, *end_item, *item;
11658         void *headers_m;
11659         void *headers_v;
11660         uint32_t l3_protocol;
11661
11662         if (!value)
11663                 return;
11664         if (!mask)
11665                 mask = &rte_flow_item_integrity_mask;
11666         if (value->level > 1) {
11667                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
11668                                          inner_headers);
11669                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
11670         } else {
11671                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
11672                                          outer_headers);
11673                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
11674         }
11675         tunnel_item = mlx5_flow_find_tunnel_item(head_item);
11676         if (value->level > 1) {
11677                 /* tunnel item was verified during the item validation */
11678                 item = tunnel_item;
11679                 end_item = mlx5_find_end_item(tunnel_item);
11680         } else {
11681                 item = head_item;
11682                 end_item = tunnel_item ? tunnel_item :
11683                            mlx5_find_end_item(integrity_item);
11684         }
11685         l3_protocol = mask->l3_ok ?
11686                       mlx5_flow_locate_proto_l3(&item, end_item) : 0;
11687         flow_dv_translate_integrity_l3(mask, value, headers_m, headers_v,
11688                                        l3_protocol == RTE_ETHER_TYPE_IPV4);
11689         flow_dv_translate_integrity_l4(mask, value, headers_m, headers_v);
11690 }
11691
11692 /**
11693  * Prepares DV flow counter with aging configuration.
11694  * Gets it by index when exists, creates a new one when doesn't.
11695  *
11696  * @param[in] dev
11697  *   Pointer to rte_eth_dev structure.
11698  * @param[in] dev_flow
11699  *   Pointer to the mlx5_flow.
11700  * @param[in, out] flow
11701  *   Pointer to the sub flow.
11702  * @param[in] count
11703  *   Pointer to the counter action configuration.
11704  * @param[in] age
11705  *   Pointer to the aging action configuration.
11706  * @param[out] error
11707  *   Pointer to the error structure.
11708  *
11709  * @return
11710  *   Pointer to the counter, NULL otherwise.
11711  */
11712 static struct mlx5_flow_counter *
11713 flow_dv_prepare_counter(struct rte_eth_dev *dev,
11714                         struct mlx5_flow *dev_flow,
11715                         struct rte_flow *flow,
11716                         const struct rte_flow_action_count *count,
11717                         const struct rte_flow_action_age *age,
11718                         struct rte_flow_error *error)
11719 {
11720         if (!flow->counter) {
11721                 flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
11722                                                                  count, age);
11723                 if (!flow->counter) {
11724                         rte_flow_error_set(error, rte_errno,
11725                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11726                                            "cannot create counter object.");
11727                         return NULL;
11728                 }
11729         }
11730         return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
11731 }
11732
11733 /*
11734  * Release an ASO CT action by its own device.
11735  *
11736  * @param[in] dev
11737  *   Pointer to the Ethernet device structure.
11738  * @param[in] idx
11739  *   Index of ASO CT action to release.
11740  *
11741  * @return
11742  *   0 when CT action was removed, otherwise the number of references.
11743  */
11744 static inline int
11745 flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx)
11746 {
11747         struct mlx5_priv *priv = dev->data->dev_private;
11748         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11749         uint32_t ret;
11750         struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
11751         enum mlx5_aso_ct_state state =
11752                         __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
11753
11754         /* Cannot release when CT is in the ASO SQ. */
11755         if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
11756                 return -1;
11757         ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED);
11758         if (!ret) {
11759                 if (ct->dr_action_orig) {
11760 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11761                         claim_zero(mlx5_glue->destroy_flow_action
11762                                         (ct->dr_action_orig));
11763 #endif
11764                         ct->dr_action_orig = NULL;
11765                 }
11766                 if (ct->dr_action_rply) {
11767 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11768                         claim_zero(mlx5_glue->destroy_flow_action
11769                                         (ct->dr_action_rply));
11770 #endif
11771                         ct->dr_action_rply = NULL;
11772                 }
11773                 /* Clear the state to free, no need in 1st allocation. */
11774                 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE);
11775                 rte_spinlock_lock(&mng->ct_sl);
11776                 LIST_INSERT_HEAD(&mng->free_cts, ct, next);
11777                 rte_spinlock_unlock(&mng->ct_sl);
11778         }
11779         return (int)ret;
11780 }
11781
11782 static inline int
11783 flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx)
11784 {
11785         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
11786         uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
11787         struct rte_eth_dev *owndev = &rte_eth_devices[owner];
11788         RTE_SET_USED(dev);
11789
11790         MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
11791         if (dev->data->dev_started != 1)
11792                 return -1;
11793         return flow_dv_aso_ct_dev_release(owndev, idx);
11794 }
11795
11796 /*
11797  * Resize the ASO CT pools array by 64 pools.
11798  *
11799  * @param[in] dev
11800  *   Pointer to the Ethernet device structure.
11801  *
11802  * @return
11803  *   0 on success, otherwise negative errno value and rte_errno is set.
11804  */
11805 static int
11806 flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev)
11807 {
11808         struct mlx5_priv *priv = dev->data->dev_private;
11809         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11810         void *old_pools = mng->pools;
11811         /* Magic number now, need a macro. */
11812         uint32_t resize = mng->n + 64;
11813         uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize;
11814         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11815
11816         if (!pools) {
11817                 rte_errno = ENOMEM;
11818                 return -rte_errno;
11819         }
11820         rte_rwlock_write_lock(&mng->resize_rwl);
11821         /* ASO SQ/QP was already initialized in the startup. */
11822         if (old_pools) {
11823                 /* Realloc could be an alternative choice. */
11824                 rte_memcpy(pools, old_pools,
11825                            mng->n * sizeof(struct mlx5_aso_ct_pool *));
11826                 mlx5_free(old_pools);
11827         }
11828         mng->n = resize;
11829         mng->pools = pools;
11830         rte_rwlock_write_unlock(&mng->resize_rwl);
11831         return 0;
11832 }
11833
11834 /*
11835  * Create and initialize a new ASO CT pool.
11836  *
11837  * @param[in] dev
11838  *   Pointer to the Ethernet device structure.
11839  * @param[out] ct_free
11840  *   Where to put the pointer of a new CT action.
11841  *
11842  * @return
11843  *   The CT actions pool pointer and @p ct_free is set on success,
11844  *   NULL otherwise and rte_errno is set.
11845  */
11846 static struct mlx5_aso_ct_pool *
11847 flow_dv_ct_pool_create(struct rte_eth_dev *dev,
11848                        struct mlx5_aso_ct_action **ct_free)
11849 {
11850         struct mlx5_priv *priv = dev->data->dev_private;
11851         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11852         struct mlx5_aso_ct_pool *pool = NULL;
11853         struct mlx5_devx_obj *obj = NULL;
11854         uint32_t i;
11855         uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
11856
11857         obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->ctx,
11858                                                 priv->sh->pdn, log_obj_size);
11859         if (!obj) {
11860                 rte_errno = ENODATA;
11861                 DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
11862                 return NULL;
11863         }
11864         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
11865         if (!pool) {
11866                 rte_errno = ENOMEM;
11867                 claim_zero(mlx5_devx_cmd_destroy(obj));
11868                 return NULL;
11869         }
11870         pool->devx_obj = obj;
11871         pool->index = mng->next;
11872         /* Resize pools array if there is no room for the new pool in it. */
11873         if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) {
11874                 claim_zero(mlx5_devx_cmd_destroy(obj));
11875                 mlx5_free(pool);
11876                 return NULL;
11877         }
11878         mng->pools[pool->index] = pool;
11879         mng->next++;
11880         /* Assign the first action in the new pool, the rest go to free list. */
11881         *ct_free = &pool->actions[0];
11882         /* Lock outside, the list operation is safe here. */
11883         for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
11884                 /* refcnt is 0 when allocating the memory. */
11885                 pool->actions[i].offset = i;
11886                 LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next);
11887         }
11888         return pool;
11889 }
11890
11891 /*
11892  * Allocate a ASO CT action from free list.
11893  *
11894  * @param[in] dev
11895  *   Pointer to the Ethernet device structure.
11896  * @param[out] error
11897  *   Pointer to the error structure.
11898  *
11899  * @return
11900  *   Index to ASO CT action on success, 0 otherwise and rte_errno is set.
11901  */
11902 static uint32_t
11903 flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
11904 {
11905         struct mlx5_priv *priv = dev->data->dev_private;
11906         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11907         struct mlx5_aso_ct_action *ct = NULL;
11908         struct mlx5_aso_ct_pool *pool;
11909         uint8_t reg_c;
11910         uint32_t ct_idx;
11911
11912         MLX5_ASSERT(mng);
11913         if (!priv->config.devx) {
11914                 rte_errno = ENOTSUP;
11915                 return 0;
11916         }
11917         /* Get a free CT action, if no, a new pool will be created. */
11918         rte_spinlock_lock(&mng->ct_sl);
11919         ct = LIST_FIRST(&mng->free_cts);
11920         if (ct) {
11921                 LIST_REMOVE(ct, next);
11922         } else if (!flow_dv_ct_pool_create(dev, &ct)) {
11923                 rte_spinlock_unlock(&mng->ct_sl);
11924                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
11925                                    NULL, "failed to create ASO CT pool");
11926                 return 0;
11927         }
11928         rte_spinlock_unlock(&mng->ct_sl);
11929         pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
11930         ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
11931         /* 0: inactive, 1: created, 2+: used by flows. */
11932         __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
11933         reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
11934         if (!ct->dr_action_orig) {
11935 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11936                 ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso
11937                         (priv->sh->rx_domain, pool->devx_obj->obj,
11938                          ct->offset,
11939                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR,
11940                          reg_c - REG_C_0);
11941 #else
11942                 RTE_SET_USED(reg_c);
11943 #endif
11944                 if (!ct->dr_action_orig) {
11945                         flow_dv_aso_ct_dev_release(dev, ct_idx);
11946                         rte_flow_error_set(error, rte_errno,
11947                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11948                                            "failed to create ASO CT action");
11949                         return 0;
11950                 }
11951         }
11952         if (!ct->dr_action_rply) {
11953 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11954                 ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso
11955                         (priv->sh->rx_domain, pool->devx_obj->obj,
11956                          ct->offset,
11957                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER,
11958                          reg_c - REG_C_0);
11959 #endif
11960                 if (!ct->dr_action_rply) {
11961                         flow_dv_aso_ct_dev_release(dev, ct_idx);
11962                         rte_flow_error_set(error, rte_errno,
11963                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11964                                            "failed to create ASO CT action");
11965                         return 0;
11966                 }
11967         }
11968         return ct_idx;
11969 }
11970
11971 /*
11972  * Create a conntrack object with context and actions by using ASO mechanism.
11973  *
11974  * @param[in] dev
11975  *   Pointer to rte_eth_dev structure.
11976  * @param[in] pro
11977  *   Pointer to conntrack information profile.
11978  * @param[out] error
11979  *   Pointer to the error structure.
11980  *
11981  * @return
11982  *   Index to conntrack object on success, 0 otherwise.
11983  */
11984 static uint32_t
11985 flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
11986                                    const struct rte_flow_action_conntrack *pro,
11987                                    struct rte_flow_error *error)
11988 {
11989         struct mlx5_priv *priv = dev->data->dev_private;
11990         struct mlx5_dev_ctx_shared *sh = priv->sh;
11991         struct mlx5_aso_ct_action *ct;
11992         uint32_t idx;
11993
11994         if (!sh->ct_aso_en)
11995                 return rte_flow_error_set(error, ENOTSUP,
11996                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11997                                           "Connection is not supported");
11998         idx = flow_dv_aso_ct_alloc(dev, error);
11999         if (!idx)
12000                 return rte_flow_error_set(error, rte_errno,
12001                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12002                                           "Failed to allocate CT object");
12003         ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12004         if (mlx5_aso_ct_update_by_wqe(sh, ct, pro))
12005                 return rte_flow_error_set(error, EBUSY,
12006                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12007                                           "Failed to update CT");
12008         ct->is_original = !!pro->is_original_dir;
12009         ct->peer = pro->peer_port;
12010         return idx;
12011 }
12012
12013 /**
12014  * Fill the flow with DV spec, lock free
12015  * (mutex should be acquired by caller).
12016  *
12017  * @param[in] dev
12018  *   Pointer to rte_eth_dev structure.
12019  * @param[in, out] dev_flow
12020  *   Pointer to the sub flow.
12021  * @param[in] attr
12022  *   Pointer to the flow attributes.
12023  * @param[in] items
12024  *   Pointer to the list of items.
12025  * @param[in] actions
12026  *   Pointer to the list of actions.
12027  * @param[out] error
12028  *   Pointer to the error structure.
12029  *
12030  * @return
12031  *   0 on success, a negative errno value otherwise and rte_errno is set.
12032  */
12033 static int
12034 flow_dv_translate(struct rte_eth_dev *dev,
12035                   struct mlx5_flow *dev_flow,
12036                   const struct rte_flow_attr *attr,
12037                   const struct rte_flow_item items[],
12038                   const struct rte_flow_action actions[],
12039                   struct rte_flow_error *error)
12040 {
12041         struct mlx5_priv *priv = dev->data->dev_private;
12042         struct mlx5_dev_config *dev_conf = &priv->config;
12043         struct rte_flow *flow = dev_flow->flow;
12044         struct mlx5_flow_handle *handle = dev_flow->handle;
12045         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12046         struct mlx5_flow_rss_desc *rss_desc;
12047         uint64_t item_flags = 0;
12048         uint64_t last_item = 0;
12049         uint64_t action_flags = 0;
12050         struct mlx5_flow_dv_matcher matcher = {
12051                 .mask = {
12052                         .size = sizeof(matcher.mask.buf) -
12053                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
12054                 },
12055         };
12056         int actions_n = 0;
12057         bool actions_end = false;
12058         union {
12059                 struct mlx5_flow_dv_modify_hdr_resource res;
12060                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
12061                             sizeof(struct mlx5_modification_cmd) *
12062                             (MLX5_MAX_MODIFY_NUM + 1)];
12063         } mhdr_dummy;
12064         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
12065         const struct rte_flow_action_count *count = NULL;
12066         const struct rte_flow_action_age *non_shared_age = NULL;
12067         union flow_dv_attr flow_attr = { .attr = 0 };
12068         uint32_t tag_be;
12069         union mlx5_flow_tbl_key tbl_key;
12070         uint32_t modify_action_position = UINT32_MAX;
12071         void *match_mask = matcher.mask.buf;
12072         void *match_value = dev_flow->dv.value.buf;
12073         uint8_t next_protocol = 0xff;
12074         struct rte_vlan_hdr vlan = { 0 };
12075         struct mlx5_flow_dv_dest_array_resource mdest_res;
12076         struct mlx5_flow_dv_sample_resource sample_res;
12077         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
12078         const struct rte_flow_action_sample *sample = NULL;
12079         struct mlx5_flow_sub_actions_list *sample_act;
12080         uint32_t sample_act_pos = UINT32_MAX;
12081         uint32_t age_act_pos = UINT32_MAX;
12082         uint32_t num_of_dest = 0;
12083         int tmp_actions_n = 0;
12084         uint32_t table;
12085         int ret = 0;
12086         const struct mlx5_flow_tunnel *tunnel = NULL;
12087         struct flow_grp_info grp_info = {
12088                 .external = !!dev_flow->external,
12089                 .transfer = !!attr->transfer,
12090                 .fdb_def_rule = !!priv->fdb_def_rule,
12091                 .skip_scale = dev_flow->skip_scale &
12092                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
12093                 .std_tbl_fix = true,
12094         };
12095         const struct rte_flow_item *head_item = items;
12096
12097         if (!wks)
12098                 return rte_flow_error_set(error, ENOMEM,
12099                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12100                                           NULL,
12101                                           "failed to push flow workspace");
12102         rss_desc = &wks->rss_desc;
12103         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
12104         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
12105         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12106                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12107         /* update normal path action resource into last index of array */
12108         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
12109         if (is_tunnel_offload_active(dev)) {
12110                 if (dev_flow->tunnel) {
12111                         RTE_VERIFY(dev_flow->tof_type ==
12112                                    MLX5_TUNNEL_OFFLOAD_MISS_RULE);
12113                         tunnel = dev_flow->tunnel;
12114                 } else {
12115                         tunnel = mlx5_get_tof(items, actions,
12116                                               &dev_flow->tof_type);
12117                         dev_flow->tunnel = tunnel;
12118                 }
12119                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
12120                                         (dev, attr, tunnel, dev_flow->tof_type);
12121         }
12122         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12123                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12124         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
12125                                        &grp_info, error);
12126         if (ret)
12127                 return ret;
12128         dev_flow->dv.group = table;
12129         if (attr->transfer)
12130                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
12131         /* number of actions must be set to 0 in case of dirty stack. */
12132         mhdr_res->actions_num = 0;
12133         if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
12134                 /*
12135                  * do not add decap action if match rule drops packet
12136                  * HW rejects rules with decap & drop
12137                  *
12138                  * if tunnel match rule was inserted before matching tunnel set
12139                  * rule flow table used in the match rule must be registered.
12140                  * current implementation handles that in the
12141                  * flow_dv_match_register() at the function end.
12142                  */
12143                 bool add_decap = true;
12144                 const struct rte_flow_action *ptr = actions;
12145
12146                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
12147                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
12148                                 add_decap = false;
12149                                 break;
12150                         }
12151                 }
12152                 if (add_decap) {
12153                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12154                                                            attr->transfer,
12155                                                            error))
12156                                 return -rte_errno;
12157                         dev_flow->dv.actions[actions_n++] =
12158                                         dev_flow->dv.encap_decap->action;
12159                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12160                 }
12161         }
12162         for (; !actions_end ; actions++) {
12163                 const struct rte_flow_action_queue *queue;
12164                 const struct rte_flow_action_rss *rss;
12165                 const struct rte_flow_action *action = actions;
12166                 const uint8_t *rss_key;
12167                 struct mlx5_flow_tbl_resource *tbl;
12168                 struct mlx5_aso_age_action *age_act;
12169                 struct mlx5_flow_counter *cnt_act;
12170                 uint32_t port_id = 0;
12171                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
12172                 int action_type = actions->type;
12173                 const struct rte_flow_action *found_action = NULL;
12174                 uint32_t jump_group = 0;
12175                 uint32_t owner_idx;
12176                 struct mlx5_aso_ct_action *ct;
12177
12178                 if (!mlx5_flow_os_action_supported(action_type))
12179                         return rte_flow_error_set(error, ENOTSUP,
12180                                                   RTE_FLOW_ERROR_TYPE_ACTION,
12181                                                   actions,
12182                                                   "action not supported");
12183                 switch (action_type) {
12184                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
12185                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
12186                         break;
12187                 case RTE_FLOW_ACTION_TYPE_VOID:
12188                         break;
12189                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
12190                         if (flow_dv_translate_action_port_id(dev, action,
12191                                                              &port_id, error))
12192                                 return -rte_errno;
12193                         port_id_resource.port_id = port_id;
12194                         MLX5_ASSERT(!handle->rix_port_id_action);
12195                         if (flow_dv_port_id_action_resource_register
12196                             (dev, &port_id_resource, dev_flow, error))
12197                                 return -rte_errno;
12198                         dev_flow->dv.actions[actions_n++] =
12199                                         dev_flow->dv.port_id_action->action;
12200                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12201                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
12202                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12203                         num_of_dest++;
12204                         break;
12205                 case RTE_FLOW_ACTION_TYPE_FLAG:
12206                         action_flags |= MLX5_FLOW_ACTION_FLAG;
12207                         dev_flow->handle->mark = 1;
12208                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12209                                 struct rte_flow_action_mark mark = {
12210                                         .id = MLX5_FLOW_MARK_DEFAULT,
12211                                 };
12212
12213                                 if (flow_dv_convert_action_mark(dev, &mark,
12214                                                                 mhdr_res,
12215                                                                 error))
12216                                         return -rte_errno;
12217                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12218                                 break;
12219                         }
12220                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
12221                         /*
12222                          * Only one FLAG or MARK is supported per device flow
12223                          * right now. So the pointer to the tag resource must be
12224                          * zero before the register process.
12225                          */
12226                         MLX5_ASSERT(!handle->dvh.rix_tag);
12227                         if (flow_dv_tag_resource_register(dev, tag_be,
12228                                                           dev_flow, error))
12229                                 return -rte_errno;
12230                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12231                         dev_flow->dv.actions[actions_n++] =
12232                                         dev_flow->dv.tag_resource->action;
12233                         break;
12234                 case RTE_FLOW_ACTION_TYPE_MARK:
12235                         action_flags |= MLX5_FLOW_ACTION_MARK;
12236                         dev_flow->handle->mark = 1;
12237                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12238                                 const struct rte_flow_action_mark *mark =
12239                                         (const struct rte_flow_action_mark *)
12240                                                 actions->conf;
12241
12242                                 if (flow_dv_convert_action_mark(dev, mark,
12243                                                                 mhdr_res,
12244                                                                 error))
12245                                         return -rte_errno;
12246                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12247                                 break;
12248                         }
12249                         /* Fall-through */
12250                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
12251                         /* Legacy (non-extensive) MARK action. */
12252                         tag_be = mlx5_flow_mark_set
12253                               (((const struct rte_flow_action_mark *)
12254                                (actions->conf))->id);
12255                         MLX5_ASSERT(!handle->dvh.rix_tag);
12256                         if (flow_dv_tag_resource_register(dev, tag_be,
12257                                                           dev_flow, error))
12258                                 return -rte_errno;
12259                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12260                         dev_flow->dv.actions[actions_n++] =
12261                                         dev_flow->dv.tag_resource->action;
12262                         break;
12263                 case RTE_FLOW_ACTION_TYPE_SET_META:
12264                         if (flow_dv_convert_action_set_meta
12265                                 (dev, mhdr_res, attr,
12266                                  (const struct rte_flow_action_set_meta *)
12267                                   actions->conf, error))
12268                                 return -rte_errno;
12269                         action_flags |= MLX5_FLOW_ACTION_SET_META;
12270                         break;
12271                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
12272                         if (flow_dv_convert_action_set_tag
12273                                 (dev, mhdr_res,
12274                                  (const struct rte_flow_action_set_tag *)
12275                                   actions->conf, error))
12276                                 return -rte_errno;
12277                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12278                         break;
12279                 case RTE_FLOW_ACTION_TYPE_DROP:
12280                         action_flags |= MLX5_FLOW_ACTION_DROP;
12281                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
12282                         break;
12283                 case RTE_FLOW_ACTION_TYPE_QUEUE:
12284                         queue = actions->conf;
12285                         rss_desc->queue_num = 1;
12286                         rss_desc->queue[0] = queue->index;
12287                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
12288                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
12289                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
12290                         num_of_dest++;
12291                         break;
12292                 case RTE_FLOW_ACTION_TYPE_RSS:
12293                         rss = actions->conf;
12294                         memcpy(rss_desc->queue, rss->queue,
12295                                rss->queue_num * sizeof(uint16_t));
12296                         rss_desc->queue_num = rss->queue_num;
12297                         /* NULL RSS key indicates default RSS key. */
12298                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
12299                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12300                         /*
12301                          * rss->level and rss.types should be set in advance
12302                          * when expanding items for RSS.
12303                          */
12304                         action_flags |= MLX5_FLOW_ACTION_RSS;
12305                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
12306                                 MLX5_FLOW_FATE_SHARED_RSS :
12307                                 MLX5_FLOW_FATE_QUEUE;
12308                         break;
12309                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
12310                         flow->age = (uint32_t)(uintptr_t)(action->conf);
12311                         age_act = flow_aso_age_get_by_idx(dev, flow->age);
12312                         __atomic_fetch_add(&age_act->refcnt, 1,
12313                                            __ATOMIC_RELAXED);
12314                         age_act_pos = actions_n++;
12315                         action_flags |= MLX5_FLOW_ACTION_AGE;
12316                         break;
12317                 case RTE_FLOW_ACTION_TYPE_AGE:
12318                         non_shared_age = action->conf;
12319                         age_act_pos = actions_n++;
12320                         action_flags |= MLX5_FLOW_ACTION_AGE;
12321                         break;
12322                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
12323                         flow->counter = (uint32_t)(uintptr_t)(action->conf);
12324                         cnt_act = flow_dv_counter_get_by_idx(dev, flow->counter,
12325                                                              NULL);
12326                         __atomic_fetch_add(&cnt_act->shared_info.refcnt, 1,
12327                                            __ATOMIC_RELAXED);
12328                         /* Save information first, will apply later. */
12329                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12330                         break;
12331                 case RTE_FLOW_ACTION_TYPE_COUNT:
12332                         if (!dev_conf->devx) {
12333                                 return rte_flow_error_set
12334                                               (error, ENOTSUP,
12335                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12336                                                NULL,
12337                                                "count action not supported");
12338                         }
12339                         /* Save information first, will apply later. */
12340                         count = action->conf;
12341                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12342                         break;
12343                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
12344                         dev_flow->dv.actions[actions_n++] =
12345                                                 priv->sh->pop_vlan_action;
12346                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
12347                         break;
12348                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
12349                         if (!(action_flags &
12350                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
12351                                 flow_dev_get_vlan_info_from_items(items, &vlan);
12352                         vlan.eth_proto = rte_be_to_cpu_16
12353                              ((((const struct rte_flow_action_of_push_vlan *)
12354                                                    actions->conf)->ethertype));
12355                         found_action = mlx5_flow_find_action
12356                                         (actions + 1,
12357                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
12358                         if (found_action)
12359                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12360                         found_action = mlx5_flow_find_action
12361                                         (actions + 1,
12362                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
12363                         if (found_action)
12364                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12365                         if (flow_dv_create_action_push_vlan
12366                                             (dev, attr, &vlan, dev_flow, error))
12367                                 return -rte_errno;
12368                         dev_flow->dv.actions[actions_n++] =
12369                                         dev_flow->dv.push_vlan_res->action;
12370                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
12371                         break;
12372                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
12373                         /* of_vlan_push action handled this action */
12374                         MLX5_ASSERT(action_flags &
12375                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
12376                         break;
12377                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
12378                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
12379                                 break;
12380                         flow_dev_get_vlan_info_from_items(items, &vlan);
12381                         mlx5_update_vlan_vid_pcp(actions, &vlan);
12382                         /* If no VLAN push - this is a modify header action */
12383                         if (flow_dv_convert_action_modify_vlan_vid
12384                                                 (mhdr_res, actions, error))
12385                                 return -rte_errno;
12386                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
12387                         break;
12388                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
12389                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
12390                         if (flow_dv_create_action_l2_encap(dev, actions,
12391                                                            dev_flow,
12392                                                            attr->transfer,
12393                                                            error))
12394                                 return -rte_errno;
12395                         dev_flow->dv.actions[actions_n++] =
12396                                         dev_flow->dv.encap_decap->action;
12397                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12398                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12399                                 sample_act->action_flags |=
12400                                                         MLX5_FLOW_ACTION_ENCAP;
12401                         break;
12402                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
12403                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
12404                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12405                                                            attr->transfer,
12406                                                            error))
12407                                 return -rte_errno;
12408                         dev_flow->dv.actions[actions_n++] =
12409                                         dev_flow->dv.encap_decap->action;
12410                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12411                         break;
12412                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
12413                         /* Handle encap with preceding decap. */
12414                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
12415                                 if (flow_dv_create_action_raw_encap
12416                                         (dev, actions, dev_flow, attr, error))
12417                                         return -rte_errno;
12418                                 dev_flow->dv.actions[actions_n++] =
12419                                         dev_flow->dv.encap_decap->action;
12420                         } else {
12421                                 /* Handle encap without preceding decap. */
12422                                 if (flow_dv_create_action_l2_encap
12423                                     (dev, actions, dev_flow, attr->transfer,
12424                                      error))
12425                                         return -rte_errno;
12426                                 dev_flow->dv.actions[actions_n++] =
12427                                         dev_flow->dv.encap_decap->action;
12428                         }
12429                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12430                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12431                                 sample_act->action_flags |=
12432                                                         MLX5_FLOW_ACTION_ENCAP;
12433                         break;
12434                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
12435                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
12436                                 ;
12437                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
12438                                 if (flow_dv_create_action_l2_decap
12439                                     (dev, dev_flow, attr->transfer, error))
12440                                         return -rte_errno;
12441                                 dev_flow->dv.actions[actions_n++] =
12442                                         dev_flow->dv.encap_decap->action;
12443                         }
12444                         /* If decap is followed by encap, handle it at encap. */
12445                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12446                         break;
12447                 case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
12448                         dev_flow->dv.actions[actions_n++] =
12449                                 (void *)(uintptr_t)action->conf;
12450                         action_flags |= MLX5_FLOW_ACTION_JUMP;
12451                         break;
12452                 case RTE_FLOW_ACTION_TYPE_JUMP:
12453                         jump_group = ((const struct rte_flow_action_jump *)
12454                                                         action->conf)->group;
12455                         grp_info.std_tbl_fix = 0;
12456                         if (dev_flow->skip_scale &
12457                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
12458                                 grp_info.skip_scale = 1;
12459                         else
12460                                 grp_info.skip_scale = 0;
12461                         ret = mlx5_flow_group_to_table(dev, tunnel,
12462                                                        jump_group,
12463                                                        &table,
12464                                                        &grp_info, error);
12465                         if (ret)
12466                                 return ret;
12467                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
12468                                                        attr->transfer,
12469                                                        !!dev_flow->external,
12470                                                        tunnel, jump_group, 0,
12471                                                        0, error);
12472                         if (!tbl)
12473                                 return rte_flow_error_set
12474                                                 (error, errno,
12475                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12476                                                  NULL,
12477                                                  "cannot create jump action.");
12478                         if (flow_dv_jump_tbl_resource_register
12479                             (dev, tbl, dev_flow, error)) {
12480                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
12481                                 return rte_flow_error_set
12482                                                 (error, errno,
12483                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12484                                                  NULL,
12485                                                  "cannot create jump action.");
12486                         }
12487                         dev_flow->dv.actions[actions_n++] =
12488                                         dev_flow->dv.jump->action;
12489                         action_flags |= MLX5_FLOW_ACTION_JUMP;
12490                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
12491                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
12492                         num_of_dest++;
12493                         break;
12494                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
12495                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
12496                         if (flow_dv_convert_action_modify_mac
12497                                         (mhdr_res, actions, error))
12498                                 return -rte_errno;
12499                         action_flags |= actions->type ==
12500                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
12501                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
12502                                         MLX5_FLOW_ACTION_SET_MAC_DST;
12503                         break;
12504                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
12505                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
12506                         if (flow_dv_convert_action_modify_ipv4
12507                                         (mhdr_res, actions, error))
12508                                 return -rte_errno;
12509                         action_flags |= actions->type ==
12510                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
12511                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
12512                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
12513                         break;
12514                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
12515                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
12516                         if (flow_dv_convert_action_modify_ipv6
12517                                         (mhdr_res, actions, error))
12518                                 return -rte_errno;
12519                         action_flags |= actions->type ==
12520                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
12521                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
12522                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
12523                         break;
12524                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
12525                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
12526                         if (flow_dv_convert_action_modify_tp
12527                                         (mhdr_res, actions, items,
12528                                          &flow_attr, dev_flow, !!(action_flags &
12529                                          MLX5_FLOW_ACTION_DECAP), error))
12530                                 return -rte_errno;
12531                         action_flags |= actions->type ==
12532                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
12533                                         MLX5_FLOW_ACTION_SET_TP_SRC :
12534                                         MLX5_FLOW_ACTION_SET_TP_DST;
12535                         break;
12536                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
12537                         if (flow_dv_convert_action_modify_dec_ttl
12538                                         (mhdr_res, items, &flow_attr, dev_flow,
12539                                          !!(action_flags &
12540                                          MLX5_FLOW_ACTION_DECAP), error))
12541                                 return -rte_errno;
12542                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
12543                         break;
12544                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
12545                         if (flow_dv_convert_action_modify_ttl
12546                                         (mhdr_res, actions, items, &flow_attr,
12547                                          dev_flow, !!(action_flags &
12548                                          MLX5_FLOW_ACTION_DECAP), error))
12549                                 return -rte_errno;
12550                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
12551                         break;
12552                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
12553                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
12554                         if (flow_dv_convert_action_modify_tcp_seq
12555                                         (mhdr_res, actions, error))
12556                                 return -rte_errno;
12557                         action_flags |= actions->type ==
12558                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
12559                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
12560                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
12561                         break;
12562
12563                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
12564                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
12565                         if (flow_dv_convert_action_modify_tcp_ack
12566                                         (mhdr_res, actions, error))
12567                                 return -rte_errno;
12568                         action_flags |= actions->type ==
12569                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
12570                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
12571                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
12572                         break;
12573                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
12574                         if (flow_dv_convert_action_set_reg
12575                                         (mhdr_res, actions, error))
12576                                 return -rte_errno;
12577                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12578                         break;
12579                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
12580                         if (flow_dv_convert_action_copy_mreg
12581                                         (dev, mhdr_res, actions, error))
12582                                 return -rte_errno;
12583                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12584                         break;
12585                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
12586                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
12587                         dev_flow->handle->fate_action =
12588                                         MLX5_FLOW_FATE_DEFAULT_MISS;
12589                         break;
12590                 case RTE_FLOW_ACTION_TYPE_METER:
12591                         if (!wks->fm)
12592                                 return rte_flow_error_set(error, rte_errno,
12593                                         RTE_FLOW_ERROR_TYPE_ACTION,
12594                                         NULL, "Failed to get meter in flow.");
12595                         /* Set the meter action. */
12596                         dev_flow->dv.actions[actions_n++] =
12597                                 wks->fm->meter_action;
12598                         action_flags |= MLX5_FLOW_ACTION_METER;
12599                         break;
12600                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
12601                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
12602                                                               actions, error))
12603                                 return -rte_errno;
12604                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
12605                         break;
12606                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
12607                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
12608                                                               actions, error))
12609                                 return -rte_errno;
12610                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
12611                         break;
12612                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
12613                         sample_act_pos = actions_n;
12614                         sample = (const struct rte_flow_action_sample *)
12615                                  action->conf;
12616                         actions_n++;
12617                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
12618                         /* put encap action into group if work with port id */
12619                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
12620                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
12621                                 sample_act->action_flags |=
12622                                                         MLX5_FLOW_ACTION_ENCAP;
12623                         break;
12624                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
12625                         if (flow_dv_convert_action_modify_field
12626                                         (dev, mhdr_res, actions, attr, error))
12627                                 return -rte_errno;
12628                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
12629                         break;
12630                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
12631                         owner_idx = (uint32_t)(uintptr_t)action->conf;
12632                         ct = flow_aso_ct_get_by_idx(dev, owner_idx);
12633                         if (!ct)
12634                                 return rte_flow_error_set(error, EINVAL,
12635                                                 RTE_FLOW_ERROR_TYPE_ACTION,
12636                                                 NULL,
12637                                                 "Failed to get CT object.");
12638                         if (mlx5_aso_ct_available(priv->sh, ct))
12639                                 return rte_flow_error_set(error, rte_errno,
12640                                                 RTE_FLOW_ERROR_TYPE_ACTION,
12641                                                 NULL,
12642                                                 "CT is unavailable.");
12643                         if (ct->is_original)
12644                                 dev_flow->dv.actions[actions_n] =
12645                                                         ct->dr_action_orig;
12646                         else
12647                                 dev_flow->dv.actions[actions_n] =
12648                                                         ct->dr_action_rply;
12649                         flow->indirect_type = MLX5_INDIRECT_ACTION_TYPE_CT;
12650                         flow->ct = owner_idx;
12651                         __atomic_fetch_add(&ct->refcnt, 1, __ATOMIC_RELAXED);
12652                         actions_n++;
12653                         action_flags |= MLX5_FLOW_ACTION_CT;
12654                         break;
12655                 case RTE_FLOW_ACTION_TYPE_END:
12656                         actions_end = true;
12657                         if (mhdr_res->actions_num) {
12658                                 /* create modify action if needed. */
12659                                 if (flow_dv_modify_hdr_resource_register
12660                                         (dev, mhdr_res, dev_flow, error))
12661                                         return -rte_errno;
12662                                 dev_flow->dv.actions[modify_action_position] =
12663                                         handle->dvh.modify_hdr->action;
12664                         }
12665                         /*
12666                          * Handle AGE and COUNT action by single HW counter
12667                          * when they are not shared.
12668                          */
12669                         if (action_flags & MLX5_FLOW_ACTION_AGE) {
12670                                 if ((non_shared_age &&
12671                                      count && !count->shared) ||
12672                                     !(priv->sh->flow_hit_aso_en &&
12673                                       (attr->group || attr->transfer))) {
12674                                         /* Creates age by counters. */
12675                                         cnt_act = flow_dv_prepare_counter
12676                                                                 (dev, dev_flow,
12677                                                                  flow, count,
12678                                                                  non_shared_age,
12679                                                                  error);
12680                                         if (!cnt_act)
12681                                                 return -rte_errno;
12682                                         dev_flow->dv.actions[age_act_pos] =
12683                                                                 cnt_act->action;
12684                                         break;
12685                                 }
12686                                 if (!flow->age && non_shared_age) {
12687                                         flow->age = flow_dv_aso_age_alloc
12688                                                                 (dev, error);
12689                                         if (!flow->age)
12690                                                 return -rte_errno;
12691                                         flow_dv_aso_age_params_init
12692                                                     (dev, flow->age,
12693                                                      non_shared_age->context ?
12694                                                      non_shared_age->context :
12695                                                      (void *)(uintptr_t)
12696                                                      (dev_flow->flow_idx),
12697                                                      non_shared_age->timeout);
12698                                 }
12699                                 age_act = flow_aso_age_get_by_idx(dev,
12700                                                                   flow->age);
12701                                 dev_flow->dv.actions[age_act_pos] =
12702                                                              age_act->dr_action;
12703                         }
12704                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
12705                                 /*
12706                                  * Create one count action, to be used
12707                                  * by all sub-flows.
12708                                  */
12709                                 cnt_act = flow_dv_prepare_counter(dev, dev_flow,
12710                                                                   flow, count,
12711                                                                   NULL, error);
12712                                 if (!cnt_act)
12713                                         return -rte_errno;
12714                                 dev_flow->dv.actions[actions_n++] =
12715                                                                 cnt_act->action;
12716                         }
12717                 default:
12718                         break;
12719                 }
12720                 if (mhdr_res->actions_num &&
12721                     modify_action_position == UINT32_MAX)
12722                         modify_action_position = actions_n++;
12723         }
12724         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
12725                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
12726                 int item_type = items->type;
12727
12728                 if (!mlx5_flow_os_item_supported(item_type))
12729                         return rte_flow_error_set(error, ENOTSUP,
12730                                                   RTE_FLOW_ERROR_TYPE_ITEM,
12731                                                   NULL, "item not supported");
12732                 switch (item_type) {
12733                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
12734                         flow_dv_translate_item_port_id
12735                                 (dev, match_mask, match_value, items, attr);
12736                         last_item = MLX5_FLOW_ITEM_PORT_ID;
12737                         break;
12738                 case RTE_FLOW_ITEM_TYPE_ETH:
12739                         flow_dv_translate_item_eth(match_mask, match_value,
12740                                                    items, tunnel,
12741                                                    dev_flow->dv.group);
12742                         matcher.priority = action_flags &
12743                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
12744                                         !dev_flow->external ?
12745                                         MLX5_PRIORITY_MAP_L3 :
12746                                         MLX5_PRIORITY_MAP_L2;
12747                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
12748                                              MLX5_FLOW_LAYER_OUTER_L2;
12749                         break;
12750                 case RTE_FLOW_ITEM_TYPE_VLAN:
12751                         flow_dv_translate_item_vlan(dev_flow,
12752                                                     match_mask, match_value,
12753                                                     items, tunnel,
12754                                                     dev_flow->dv.group);
12755                         matcher.priority = MLX5_PRIORITY_MAP_L2;
12756                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
12757                                               MLX5_FLOW_LAYER_INNER_VLAN) :
12758                                              (MLX5_FLOW_LAYER_OUTER_L2 |
12759                                               MLX5_FLOW_LAYER_OUTER_VLAN);
12760                         break;
12761                 case RTE_FLOW_ITEM_TYPE_IPV4:
12762                         mlx5_flow_tunnel_ip_check(items, next_protocol,
12763                                                   &item_flags, &tunnel);
12764                         flow_dv_translate_item_ipv4(match_mask, match_value,
12765                                                     items, tunnel,
12766                                                     dev_flow->dv.group);
12767                         matcher.priority = MLX5_PRIORITY_MAP_L3;
12768                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
12769                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
12770                         if (items->mask != NULL &&
12771                             ((const struct rte_flow_item_ipv4 *)
12772                              items->mask)->hdr.next_proto_id) {
12773                                 next_protocol =
12774                                         ((const struct rte_flow_item_ipv4 *)
12775                                          (items->spec))->hdr.next_proto_id;
12776                                 next_protocol &=
12777                                         ((const struct rte_flow_item_ipv4 *)
12778                                          (items->mask))->hdr.next_proto_id;
12779                         } else {
12780                                 /* Reset for inner layer. */
12781                                 next_protocol = 0xff;
12782                         }
12783                         break;
12784                 case RTE_FLOW_ITEM_TYPE_IPV6:
12785                         mlx5_flow_tunnel_ip_check(items, next_protocol,
12786                                                   &item_flags, &tunnel);
12787                         flow_dv_translate_item_ipv6(match_mask, match_value,
12788                                                     items, tunnel,
12789                                                     dev_flow->dv.group);
12790                         matcher.priority = MLX5_PRIORITY_MAP_L3;
12791                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
12792                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
12793                         if (items->mask != NULL &&
12794                             ((const struct rte_flow_item_ipv6 *)
12795                              items->mask)->hdr.proto) {
12796                                 next_protocol =
12797                                         ((const struct rte_flow_item_ipv6 *)
12798                                          items->spec)->hdr.proto;
12799                                 next_protocol &=
12800                                         ((const struct rte_flow_item_ipv6 *)
12801                                          items->mask)->hdr.proto;
12802                         } else {
12803                                 /* Reset for inner layer. */
12804                                 next_protocol = 0xff;
12805                         }
12806                         break;
12807                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
12808                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
12809                                                              match_value,
12810                                                              items, tunnel);
12811                         last_item = tunnel ?
12812                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
12813                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
12814                         if (items->mask != NULL &&
12815                             ((const struct rte_flow_item_ipv6_frag_ext *)
12816                              items->mask)->hdr.next_header) {
12817                                 next_protocol =
12818                                 ((const struct rte_flow_item_ipv6_frag_ext *)
12819                                  items->spec)->hdr.next_header;
12820                                 next_protocol &=
12821                                 ((const struct rte_flow_item_ipv6_frag_ext *)
12822                                  items->mask)->hdr.next_header;
12823                         } else {
12824                                 /* Reset for inner layer. */
12825                                 next_protocol = 0xff;
12826                         }
12827                         break;
12828                 case RTE_FLOW_ITEM_TYPE_TCP:
12829                         flow_dv_translate_item_tcp(match_mask, match_value,
12830                                                    items, tunnel);
12831                         matcher.priority = MLX5_PRIORITY_MAP_L4;
12832                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
12833                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
12834                         break;
12835                 case RTE_FLOW_ITEM_TYPE_UDP:
12836                         flow_dv_translate_item_udp(match_mask, match_value,
12837                                                    items, tunnel);
12838                         matcher.priority = MLX5_PRIORITY_MAP_L4;
12839                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
12840                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
12841                         break;
12842                 case RTE_FLOW_ITEM_TYPE_GRE:
12843                         flow_dv_translate_item_gre(match_mask, match_value,
12844                                                    items, tunnel);
12845                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12846                         last_item = MLX5_FLOW_LAYER_GRE;
12847                         break;
12848                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
12849                         flow_dv_translate_item_gre_key(match_mask,
12850                                                        match_value, items);
12851                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
12852                         break;
12853                 case RTE_FLOW_ITEM_TYPE_NVGRE:
12854                         flow_dv_translate_item_nvgre(match_mask, match_value,
12855                                                      items, tunnel);
12856                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12857                         last_item = MLX5_FLOW_LAYER_GRE;
12858                         break;
12859                 case RTE_FLOW_ITEM_TYPE_VXLAN:
12860                         flow_dv_translate_item_vxlan(match_mask, match_value,
12861                                                      items, tunnel);
12862                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12863                         last_item = MLX5_FLOW_LAYER_VXLAN;
12864                         break;
12865                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
12866                         flow_dv_translate_item_vxlan_gpe(match_mask,
12867                                                          match_value, items,
12868                                                          tunnel);
12869                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12870                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
12871                         break;
12872                 case RTE_FLOW_ITEM_TYPE_GENEVE:
12873                         flow_dv_translate_item_geneve(match_mask, match_value,
12874                                                       items, tunnel);
12875                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12876                         last_item = MLX5_FLOW_LAYER_GENEVE;
12877                         break;
12878                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
12879                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
12880                                                           match_value,
12881                                                           items, error);
12882                         if (ret)
12883                                 return rte_flow_error_set(error, -ret,
12884                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
12885                                         "cannot create GENEVE TLV option");
12886                         flow->geneve_tlv_option = 1;
12887                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
12888                         break;
12889                 case RTE_FLOW_ITEM_TYPE_MPLS:
12890                         flow_dv_translate_item_mpls(match_mask, match_value,
12891                                                     items, last_item, tunnel);
12892                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12893                         last_item = MLX5_FLOW_LAYER_MPLS;
12894                         break;
12895                 case RTE_FLOW_ITEM_TYPE_MARK:
12896                         flow_dv_translate_item_mark(dev, match_mask,
12897                                                     match_value, items);
12898                         last_item = MLX5_FLOW_ITEM_MARK;
12899                         break;
12900                 case RTE_FLOW_ITEM_TYPE_META:
12901                         flow_dv_translate_item_meta(dev, match_mask,
12902                                                     match_value, attr, items);
12903                         last_item = MLX5_FLOW_ITEM_METADATA;
12904                         break;
12905                 case RTE_FLOW_ITEM_TYPE_ICMP:
12906                         flow_dv_translate_item_icmp(match_mask, match_value,
12907                                                     items, tunnel);
12908                         last_item = MLX5_FLOW_LAYER_ICMP;
12909                         break;
12910                 case RTE_FLOW_ITEM_TYPE_ICMP6:
12911                         flow_dv_translate_item_icmp6(match_mask, match_value,
12912                                                       items, tunnel);
12913                         last_item = MLX5_FLOW_LAYER_ICMP6;
12914                         break;
12915                 case RTE_FLOW_ITEM_TYPE_TAG:
12916                         flow_dv_translate_item_tag(dev, match_mask,
12917                                                    match_value, items);
12918                         last_item = MLX5_FLOW_ITEM_TAG;
12919                         break;
12920                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
12921                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
12922                                                         match_value, items);
12923                         last_item = MLX5_FLOW_ITEM_TAG;
12924                         break;
12925                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
12926                         flow_dv_translate_item_tx_queue(dev, match_mask,
12927                                                         match_value,
12928                                                         items);
12929                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
12930                         break;
12931                 case RTE_FLOW_ITEM_TYPE_GTP:
12932                         flow_dv_translate_item_gtp(match_mask, match_value,
12933                                                    items, tunnel);
12934                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12935                         last_item = MLX5_FLOW_LAYER_GTP;
12936                         break;
12937                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
12938                         ret = flow_dv_translate_item_gtp_psc(match_mask,
12939                                                           match_value,
12940                                                           items);
12941                         if (ret)
12942                                 return rte_flow_error_set(error, -ret,
12943                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
12944                                         "cannot create GTP PSC item");
12945                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
12946                         break;
12947                 case RTE_FLOW_ITEM_TYPE_ECPRI:
12948                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
12949                                 /* Create it only the first time to be used. */
12950                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
12951                                 if (ret)
12952                                         return rte_flow_error_set
12953                                                 (error, -ret,
12954                                                 RTE_FLOW_ERROR_TYPE_ITEM,
12955                                                 NULL,
12956                                                 "cannot create eCPRI parser");
12957                         }
12958                         /* Adjust the length matcher and device flow value. */
12959                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
12960                         dev_flow->dv.value.size =
12961                                         MLX5_ST_SZ_BYTES(fte_match_param);
12962                         flow_dv_translate_item_ecpri(dev, match_mask,
12963                                                      match_value, items);
12964                         /* No other protocol should follow eCPRI layer. */
12965                         last_item = MLX5_FLOW_LAYER_ECPRI;
12966                         break;
12967                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
12968                         flow_dv_translate_item_integrity(match_mask,
12969                                                          match_value,
12970                                                          head_item, items);
12971                         break;
12972                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
12973                         flow_dv_translate_item_aso_ct(dev, match_mask,
12974                                                       match_value, items);
12975                         break;
12976                 default:
12977                         break;
12978                 }
12979                 item_flags |= last_item;
12980         }
12981         /*
12982          * When E-Switch mode is enabled, we have two cases where we need to
12983          * set the source port manually.
12984          * The first one, is in case of Nic steering rule, and the second is
12985          * E-Switch rule where no port_id item was found. In both cases
12986          * the source port is set according the current port in use.
12987          */
12988         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
12989             (priv->representor || priv->master)) {
12990                 if (flow_dv_translate_item_port_id(dev, match_mask,
12991                                                    match_value, NULL, attr))
12992                         return -rte_errno;
12993         }
12994 #ifdef RTE_LIBRTE_MLX5_DEBUG
12995         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
12996                                               dev_flow->dv.value.buf));
12997 #endif
12998         /*
12999          * Layers may be already initialized from prefix flow if this dev_flow
13000          * is the suffix flow.
13001          */
13002         handle->layers |= item_flags;
13003         if (action_flags & MLX5_FLOW_ACTION_RSS)
13004                 flow_dv_hashfields_set(dev_flow, rss_desc);
13005         /* If has RSS action in the sample action, the Sample/Mirror resource
13006          * should be registered after the hash filed be update.
13007          */
13008         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
13009                 ret = flow_dv_translate_action_sample(dev,
13010                                                       sample,
13011                                                       dev_flow, attr,
13012                                                       &num_of_dest,
13013                                                       sample_actions,
13014                                                       &sample_res,
13015                                                       error);
13016                 if (ret < 0)
13017                         return ret;
13018                 ret = flow_dv_create_action_sample(dev,
13019                                                    dev_flow,
13020                                                    num_of_dest,
13021                                                    &sample_res,
13022                                                    &mdest_res,
13023                                                    sample_actions,
13024                                                    action_flags,
13025                                                    error);
13026                 if (ret < 0)
13027                         return rte_flow_error_set
13028                                                 (error, rte_errno,
13029                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13030                                                 NULL,
13031                                                 "cannot create sample action");
13032                 if (num_of_dest > 1) {
13033                         dev_flow->dv.actions[sample_act_pos] =
13034                         dev_flow->dv.dest_array_res->action;
13035                 } else {
13036                         dev_flow->dv.actions[sample_act_pos] =
13037                         dev_flow->dv.sample_res->verbs_action;
13038                 }
13039         }
13040         /*
13041          * For multiple destination (sample action with ratio=1), the encap
13042          * action and port id action will be combined into group action.
13043          * So need remove the original these actions in the flow and only
13044          * use the sample action instead of.
13045          */
13046         if (num_of_dest > 1 &&
13047             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
13048                 int i;
13049                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
13050
13051                 for (i = 0; i < actions_n; i++) {
13052                         if ((sample_act->dr_encap_action &&
13053                                 sample_act->dr_encap_action ==
13054                                 dev_flow->dv.actions[i]) ||
13055                                 (sample_act->dr_port_id_action &&
13056                                 sample_act->dr_port_id_action ==
13057                                 dev_flow->dv.actions[i]) ||
13058                                 (sample_act->dr_jump_action &&
13059                                 sample_act->dr_jump_action ==
13060                                 dev_flow->dv.actions[i]))
13061                                 continue;
13062                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
13063                 }
13064                 memcpy((void *)dev_flow->dv.actions,
13065                                 (void *)temp_actions,
13066                                 tmp_actions_n * sizeof(void *));
13067                 actions_n = tmp_actions_n;
13068         }
13069         dev_flow->dv.actions_n = actions_n;
13070         dev_flow->act_flags = action_flags;
13071         if (wks->skip_matcher_reg)
13072                 return 0;
13073         /* Register matcher. */
13074         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
13075                                     matcher.mask.size);
13076         matcher.priority = mlx5_get_matcher_priority(dev, attr,
13077                                         matcher.priority);
13078         /* reserved field no needs to be set to 0 here. */
13079         tbl_key.is_fdb = attr->transfer;
13080         tbl_key.is_egress = attr->egress;
13081         tbl_key.level = dev_flow->dv.group;
13082         tbl_key.id = dev_flow->dv.table_id;
13083         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
13084                                      tunnel, attr->group, error))
13085                 return -rte_errno;
13086         return 0;
13087 }
13088
13089 /**
13090  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13091  * and tunnel.
13092  *
13093  * @param[in, out] action
13094  *   Shred RSS action holding hash RX queue objects.
13095  * @param[in] hash_fields
13096  *   Defines combination of packet fields to participate in RX hash.
13097  * @param[in] tunnel
13098  *   Tunnel type
13099  * @param[in] hrxq_idx
13100  *   Hash RX queue index to set.
13101  *
13102  * @return
13103  *   0 on success, otherwise negative errno value.
13104  */
13105 static int
13106 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
13107                               const uint64_t hash_fields,
13108                               uint32_t hrxq_idx)
13109 {
13110         uint32_t *hrxqs = action->hrxq;
13111
13112         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13113         case MLX5_RSS_HASH_IPV4:
13114                 /* fall-through. */
13115         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13116                 /* fall-through. */
13117         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13118                 hrxqs[0] = hrxq_idx;
13119                 return 0;
13120         case MLX5_RSS_HASH_IPV4_TCP:
13121                 /* fall-through. */
13122         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13123                 /* fall-through. */
13124         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13125                 hrxqs[1] = hrxq_idx;
13126                 return 0;
13127         case MLX5_RSS_HASH_IPV4_UDP:
13128                 /* fall-through. */
13129         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13130                 /* fall-through. */
13131         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13132                 hrxqs[2] = hrxq_idx;
13133                 return 0;
13134         case MLX5_RSS_HASH_IPV6:
13135                 /* fall-through. */
13136         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13137                 /* fall-through. */
13138         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13139                 hrxqs[3] = hrxq_idx;
13140                 return 0;
13141         case MLX5_RSS_HASH_IPV6_TCP:
13142                 /* fall-through. */
13143         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13144                 /* fall-through. */
13145         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13146                 hrxqs[4] = hrxq_idx;
13147                 return 0;
13148         case MLX5_RSS_HASH_IPV6_UDP:
13149                 /* fall-through. */
13150         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13151                 /* fall-through. */
13152         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13153                 hrxqs[5] = hrxq_idx;
13154                 return 0;
13155         case MLX5_RSS_HASH_NONE:
13156                 hrxqs[6] = hrxq_idx;
13157                 return 0;
13158         default:
13159                 return -1;
13160         }
13161 }
13162
13163 /**
13164  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13165  * and tunnel.
13166  *
13167  * @param[in] dev
13168  *   Pointer to the Ethernet device structure.
13169  * @param[in] idx
13170  *   Shared RSS action ID holding hash RX queue objects.
13171  * @param[in] hash_fields
13172  *   Defines combination of packet fields to participate in RX hash.
13173  * @param[in] tunnel
13174  *   Tunnel type
13175  *
13176  * @return
13177  *   Valid hash RX queue index, otherwise 0.
13178  */
13179 static uint32_t
13180 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
13181                                  const uint64_t hash_fields)
13182 {
13183         struct mlx5_priv *priv = dev->data->dev_private;
13184         struct mlx5_shared_action_rss *shared_rss =
13185             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13186         const uint32_t *hrxqs = shared_rss->hrxq;
13187
13188         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13189         case MLX5_RSS_HASH_IPV4:
13190                 /* fall-through. */
13191         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13192                 /* fall-through. */
13193         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13194                 return hrxqs[0];
13195         case MLX5_RSS_HASH_IPV4_TCP:
13196                 /* fall-through. */
13197         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13198                 /* fall-through. */
13199         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13200                 return hrxqs[1];
13201         case MLX5_RSS_HASH_IPV4_UDP:
13202                 /* fall-through. */
13203         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13204                 /* fall-through. */
13205         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13206                 return hrxqs[2];
13207         case MLX5_RSS_HASH_IPV6:
13208                 /* fall-through. */
13209         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13210                 /* fall-through. */
13211         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13212                 return hrxqs[3];
13213         case MLX5_RSS_HASH_IPV6_TCP:
13214                 /* fall-through. */
13215         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13216                 /* fall-through. */
13217         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13218                 return hrxqs[4];
13219         case MLX5_RSS_HASH_IPV6_UDP:
13220                 /* fall-through. */
13221         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13222                 /* fall-through. */
13223         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13224                 return hrxqs[5];
13225         case MLX5_RSS_HASH_NONE:
13226                 return hrxqs[6];
13227         default:
13228                 return 0;
13229         }
13230
13231 }
13232
13233 /**
13234  * Apply the flow to the NIC, lock free,
13235  * (mutex should be acquired by caller).
13236  *
13237  * @param[in] dev
13238  *   Pointer to the Ethernet device structure.
13239  * @param[in, out] flow
13240  *   Pointer to flow structure.
13241  * @param[out] error
13242  *   Pointer to error structure.
13243  *
13244  * @return
13245  *   0 on success, a negative errno value otherwise and rte_errno is set.
13246  */
13247 static int
13248 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
13249               struct rte_flow_error *error)
13250 {
13251         struct mlx5_flow_dv_workspace *dv;
13252         struct mlx5_flow_handle *dh;
13253         struct mlx5_flow_handle_dv *dv_h;
13254         struct mlx5_flow *dev_flow;
13255         struct mlx5_priv *priv = dev->data->dev_private;
13256         uint32_t handle_idx;
13257         int n;
13258         int err;
13259         int idx;
13260         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
13261         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
13262
13263         MLX5_ASSERT(wks);
13264         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
13265                 dev_flow = &wks->flows[idx];
13266                 dv = &dev_flow->dv;
13267                 dh = dev_flow->handle;
13268                 dv_h = &dh->dvh;
13269                 n = dv->actions_n;
13270                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
13271                         if (dv->transfer) {
13272                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13273                                 dv->actions[n++] = priv->sh->dr_drop_action;
13274                         } else {
13275 #ifdef HAVE_MLX5DV_DR
13276                                 /* DR supports drop action placeholder. */
13277                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13278                                 dv->actions[n++] = priv->sh->dr_drop_action;
13279 #else
13280                                 /* For DV we use the explicit drop queue. */
13281                                 MLX5_ASSERT(priv->drop_queue.hrxq);
13282                                 dv->actions[n++] =
13283                                                 priv->drop_queue.hrxq->action;
13284 #endif
13285                         }
13286                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
13287                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
13288                         struct mlx5_hrxq *hrxq;
13289                         uint32_t hrxq_idx;
13290
13291                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
13292                                                     &hrxq_idx);
13293                         if (!hrxq) {
13294                                 rte_flow_error_set
13295                                         (error, rte_errno,
13296                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13297                                          "cannot get hash queue");
13298                                 goto error;
13299                         }
13300                         dh->rix_hrxq = hrxq_idx;
13301                         dv->actions[n++] = hrxq->action;
13302                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13303                         struct mlx5_hrxq *hrxq = NULL;
13304                         uint32_t hrxq_idx;
13305
13306                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
13307                                                 rss_desc->shared_rss,
13308                                                 dev_flow->hash_fields);
13309                         if (hrxq_idx)
13310                                 hrxq = mlx5_ipool_get
13311                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
13312                                          hrxq_idx);
13313                         if (!hrxq) {
13314                                 rte_flow_error_set
13315                                         (error, rte_errno,
13316                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13317                                          "cannot get hash queue");
13318                                 goto error;
13319                         }
13320                         dh->rix_srss = rss_desc->shared_rss;
13321                         dv->actions[n++] = hrxq->action;
13322                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
13323                         if (!priv->sh->default_miss_action) {
13324                                 rte_flow_error_set
13325                                         (error, rte_errno,
13326                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13327                                          "default miss action not be created.");
13328                                 goto error;
13329                         }
13330                         dv->actions[n++] = priv->sh->default_miss_action;
13331                 }
13332                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
13333                                                (void *)&dv->value, n,
13334                                                dv->actions, &dh->drv_flow);
13335                 if (err) {
13336                         rte_flow_error_set(error, errno,
13337                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13338                                            NULL,
13339                                            "hardware refuses to create flow");
13340                         goto error;
13341                 }
13342                 if (priv->vmwa_context &&
13343                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
13344                         /*
13345                          * The rule contains the VLAN pattern.
13346                          * For VF we are going to create VLAN
13347                          * interface to make hypervisor set correct
13348                          * e-Switch vport context.
13349                          */
13350                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
13351                 }
13352         }
13353         return 0;
13354 error:
13355         err = rte_errno; /* Save rte_errno before cleanup. */
13356         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
13357                        handle_idx, dh, next) {
13358                 /* hrxq is union, don't clear it if the flag is not set. */
13359                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
13360                         mlx5_hrxq_release(dev, dh->rix_hrxq);
13361                         dh->rix_hrxq = 0;
13362                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13363                         dh->rix_srss = 0;
13364                 }
13365                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
13366                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
13367         }
13368         rte_errno = err; /* Restore rte_errno. */
13369         return -rte_errno;
13370 }
13371
13372 void
13373 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
13374                           struct mlx5_cache_entry *entry)
13375 {
13376         struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
13377                                                           entry);
13378
13379         claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
13380         mlx5_free(cache);
13381 }
13382
13383 /**
13384  * Release the flow matcher.
13385  *
13386  * @param dev
13387  *   Pointer to Ethernet device.
13388  * @param port_id
13389  *   Index to port ID action resource.
13390  *
13391  * @return
13392  *   1 while a reference on it exists, 0 when freed.
13393  */
13394 static int
13395 flow_dv_matcher_release(struct rte_eth_dev *dev,
13396                         struct mlx5_flow_handle *handle)
13397 {
13398         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
13399         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
13400                                                             typeof(*tbl), tbl);
13401         int ret;
13402
13403         MLX5_ASSERT(matcher->matcher_object);
13404         ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
13405         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
13406         return ret;
13407 }
13408
13409 /**
13410  * Release encap_decap resource.
13411  *
13412  * @param list
13413  *   Pointer to the hash list.
13414  * @param entry
13415  *   Pointer to exist resource entry object.
13416  */
13417 void
13418 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
13419                               struct mlx5_hlist_entry *entry)
13420 {
13421         struct mlx5_dev_ctx_shared *sh = list->ctx;
13422         struct mlx5_flow_dv_encap_decap_resource *res =
13423                 container_of(entry, typeof(*res), entry);
13424
13425         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13426         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
13427 }
13428
13429 /**
13430  * Release an encap/decap resource.
13431  *
13432  * @param dev
13433  *   Pointer to Ethernet device.
13434  * @param encap_decap_idx
13435  *   Index of encap decap resource.
13436  *
13437  * @return
13438  *   1 while a reference on it exists, 0 when freed.
13439  */
13440 static int
13441 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
13442                                      uint32_t encap_decap_idx)
13443 {
13444         struct mlx5_priv *priv = dev->data->dev_private;
13445         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
13446
13447         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
13448                                         encap_decap_idx);
13449         if (!cache_resource)
13450                 return 0;
13451         MLX5_ASSERT(cache_resource->action);
13452         return mlx5_hlist_unregister(priv->sh->encaps_decaps,
13453                                      &cache_resource->entry);
13454 }
13455
13456 /**
13457  * Release an jump to table action resource.
13458  *
13459  * @param dev
13460  *   Pointer to Ethernet device.
13461  * @param rix_jump
13462  *   Index to the jump action resource.
13463  *
13464  * @return
13465  *   1 while a reference on it exists, 0 when freed.
13466  */
13467 static int
13468 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
13469                                   uint32_t rix_jump)
13470 {
13471         struct mlx5_priv *priv = dev->data->dev_private;
13472         struct mlx5_flow_tbl_data_entry *tbl_data;
13473
13474         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
13475                                   rix_jump);
13476         if (!tbl_data)
13477                 return 0;
13478         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
13479 }
13480
13481 void
13482 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
13483                          struct mlx5_hlist_entry *entry)
13484 {
13485         struct mlx5_flow_dv_modify_hdr_resource *res =
13486                 container_of(entry, typeof(*res), entry);
13487
13488         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13489         mlx5_free(entry);
13490 }
13491
13492 /**
13493  * Release a modify-header resource.
13494  *
13495  * @param dev
13496  *   Pointer to Ethernet device.
13497  * @param handle
13498  *   Pointer to mlx5_flow_handle.
13499  *
13500  * @return
13501  *   1 while a reference on it exists, 0 when freed.
13502  */
13503 static int
13504 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
13505                                     struct mlx5_flow_handle *handle)
13506 {
13507         struct mlx5_priv *priv = dev->data->dev_private;
13508         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
13509
13510         MLX5_ASSERT(entry->action);
13511         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
13512 }
13513
13514 void
13515 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
13516                           struct mlx5_cache_entry *entry)
13517 {
13518         struct mlx5_dev_ctx_shared *sh = list->ctx;
13519         struct mlx5_flow_dv_port_id_action_resource *cache =
13520                         container_of(entry, typeof(*cache), entry);
13521
13522         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
13523         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
13524 }
13525
13526 /**
13527  * Release port ID action resource.
13528  *
13529  * @param dev
13530  *   Pointer to Ethernet device.
13531  * @param handle
13532  *   Pointer to mlx5_flow_handle.
13533  *
13534  * @return
13535  *   1 while a reference on it exists, 0 when freed.
13536  */
13537 static int
13538 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
13539                                         uint32_t port_id)
13540 {
13541         struct mlx5_priv *priv = dev->data->dev_private;
13542         struct mlx5_flow_dv_port_id_action_resource *cache;
13543
13544         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
13545         if (!cache)
13546                 return 0;
13547         MLX5_ASSERT(cache->action);
13548         return mlx5_cache_unregister(&priv->sh->port_id_action_list,
13549                                      &cache->entry);
13550 }
13551
13552 /**
13553  * Release shared RSS action resource.
13554  *
13555  * @param dev
13556  *   Pointer to Ethernet device.
13557  * @param srss
13558  *   Shared RSS action index.
13559  */
13560 static void
13561 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
13562 {
13563         struct mlx5_priv *priv = dev->data->dev_private;
13564         struct mlx5_shared_action_rss *shared_rss;
13565
13566         shared_rss = mlx5_ipool_get
13567                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
13568         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
13569 }
13570
13571 void
13572 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
13573                             struct mlx5_cache_entry *entry)
13574 {
13575         struct mlx5_dev_ctx_shared *sh = list->ctx;
13576         struct mlx5_flow_dv_push_vlan_action_resource *cache =
13577                         container_of(entry, typeof(*cache), entry);
13578
13579         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
13580         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
13581 }
13582
13583 /**
13584  * Release push vlan action resource.
13585  *
13586  * @param dev
13587  *   Pointer to Ethernet device.
13588  * @param handle
13589  *   Pointer to mlx5_flow_handle.
13590  *
13591  * @return
13592  *   1 while a reference on it exists, 0 when freed.
13593  */
13594 static int
13595 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
13596                                           struct mlx5_flow_handle *handle)
13597 {
13598         struct mlx5_priv *priv = dev->data->dev_private;
13599         struct mlx5_flow_dv_push_vlan_action_resource *cache;
13600         uint32_t idx = handle->dvh.rix_push_vlan;
13601
13602         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
13603         if (!cache)
13604                 return 0;
13605         MLX5_ASSERT(cache->action);
13606         return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
13607                                      &cache->entry);
13608 }
13609
13610 /**
13611  * Release the fate resource.
13612  *
13613  * @param dev
13614  *   Pointer to Ethernet device.
13615  * @param handle
13616  *   Pointer to mlx5_flow_handle.
13617  */
13618 static void
13619 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
13620                                struct mlx5_flow_handle *handle)
13621 {
13622         if (!handle->rix_fate)
13623                 return;
13624         switch (handle->fate_action) {
13625         case MLX5_FLOW_FATE_QUEUE:
13626                 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
13627                         mlx5_hrxq_release(dev, handle->rix_hrxq);
13628                 break;
13629         case MLX5_FLOW_FATE_JUMP:
13630                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
13631                 break;
13632         case MLX5_FLOW_FATE_PORT_ID:
13633                 flow_dv_port_id_action_resource_release(dev,
13634                                 handle->rix_port_id_action);
13635                 break;
13636         default:
13637                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
13638                 break;
13639         }
13640         handle->rix_fate = 0;
13641 }
13642
13643 void
13644 flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused,
13645                          struct mlx5_cache_entry *entry)
13646 {
13647         struct mlx5_flow_dv_sample_resource *cache_resource =
13648                         container_of(entry, typeof(*cache_resource), entry);
13649         struct rte_eth_dev *dev = cache_resource->dev;
13650         struct mlx5_priv *priv = dev->data->dev_private;
13651
13652         if (cache_resource->verbs_action)
13653                 claim_zero(mlx5_flow_os_destroy_flow_action
13654                                 (cache_resource->verbs_action));
13655         if (cache_resource->normal_path_tbl)
13656                 flow_dv_tbl_resource_release(MLX5_SH(dev),
13657                         cache_resource->normal_path_tbl);
13658         flow_dv_sample_sub_actions_release(dev,
13659                                 &cache_resource->sample_idx);
13660         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
13661                         cache_resource->idx);
13662         DRV_LOG(DEBUG, "sample resource %p: removed",
13663                 (void *)cache_resource);
13664 }
13665
13666 /**
13667  * Release an sample resource.
13668  *
13669  * @param dev
13670  *   Pointer to Ethernet device.
13671  * @param handle
13672  *   Pointer to mlx5_flow_handle.
13673  *
13674  * @return
13675  *   1 while a reference on it exists, 0 when freed.
13676  */
13677 static int
13678 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
13679                                      struct mlx5_flow_handle *handle)
13680 {
13681         struct mlx5_priv *priv = dev->data->dev_private;
13682         struct mlx5_flow_dv_sample_resource *cache_resource;
13683
13684         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
13685                          handle->dvh.rix_sample);
13686         if (!cache_resource)
13687                 return 0;
13688         MLX5_ASSERT(cache_resource->verbs_action);
13689         return mlx5_cache_unregister(&priv->sh->sample_action_list,
13690                                      &cache_resource->entry);
13691 }
13692
13693 void
13694 flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list __rte_unused,
13695                              struct mlx5_cache_entry *entry)
13696 {
13697         struct mlx5_flow_dv_dest_array_resource *cache_resource =
13698                         container_of(entry, typeof(*cache_resource), entry);
13699         struct rte_eth_dev *dev = cache_resource->dev;
13700         struct mlx5_priv *priv = dev->data->dev_private;
13701         uint32_t i = 0;
13702
13703         MLX5_ASSERT(cache_resource->action);
13704         if (cache_resource->action)
13705                 claim_zero(mlx5_flow_os_destroy_flow_action
13706                                         (cache_resource->action));
13707         for (; i < cache_resource->num_of_dest; i++)
13708                 flow_dv_sample_sub_actions_release(dev,
13709                                 &cache_resource->sample_idx[i]);
13710         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
13711                         cache_resource->idx);
13712         DRV_LOG(DEBUG, "destination array resource %p: removed",
13713                 (void *)cache_resource);
13714 }
13715
13716 /**
13717  * Release an destination array resource.
13718  *
13719  * @param dev
13720  *   Pointer to Ethernet device.
13721  * @param handle
13722  *   Pointer to mlx5_flow_handle.
13723  *
13724  * @return
13725  *   1 while a reference on it exists, 0 when freed.
13726  */
13727 static int
13728 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
13729                                     struct mlx5_flow_handle *handle)
13730 {
13731         struct mlx5_priv *priv = dev->data->dev_private;
13732         struct mlx5_flow_dv_dest_array_resource *cache;
13733
13734         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
13735                                handle->dvh.rix_dest_array);
13736         if (!cache)
13737                 return 0;
13738         MLX5_ASSERT(cache->action);
13739         return mlx5_cache_unregister(&priv->sh->dest_array_list,
13740                                      &cache->entry);
13741 }
13742
13743 static void
13744 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
13745 {
13746         struct mlx5_priv *priv = dev->data->dev_private;
13747         struct mlx5_dev_ctx_shared *sh = priv->sh;
13748         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
13749                                 sh->geneve_tlv_option_resource;
13750         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
13751         if (geneve_opt_resource) {
13752                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
13753                                          __ATOMIC_RELAXED))) {
13754                         claim_zero(mlx5_devx_cmd_destroy
13755                                         (geneve_opt_resource->obj));
13756                         mlx5_free(sh->geneve_tlv_option_resource);
13757                         sh->geneve_tlv_option_resource = NULL;
13758                 }
13759         }
13760         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
13761 }
13762
13763 /**
13764  * Remove the flow from the NIC but keeps it in memory.
13765  * Lock free, (mutex should be acquired by caller).
13766  *
13767  * @param[in] dev
13768  *   Pointer to Ethernet device.
13769  * @param[in, out] flow
13770  *   Pointer to flow structure.
13771  */
13772 static void
13773 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
13774 {
13775         struct mlx5_flow_handle *dh;
13776         uint32_t handle_idx;
13777         struct mlx5_priv *priv = dev->data->dev_private;
13778
13779         if (!flow)
13780                 return;
13781         handle_idx = flow->dev_handles;
13782         while (handle_idx) {
13783                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
13784                                     handle_idx);
13785                 if (!dh)
13786                         return;
13787                 if (dh->drv_flow) {
13788                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
13789                         dh->drv_flow = NULL;
13790                 }
13791                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
13792                         flow_dv_fate_resource_release(dev, dh);
13793                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
13794                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
13795                 handle_idx = dh->next.next;
13796         }
13797 }
13798
13799 /**
13800  * Remove the flow from the NIC and the memory.
13801  * Lock free, (mutex should be acquired by caller).
13802  *
13803  * @param[in] dev
13804  *   Pointer to the Ethernet device structure.
13805  * @param[in, out] flow
13806  *   Pointer to flow structure.
13807  */
13808 static void
13809 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
13810 {
13811         struct mlx5_flow_handle *dev_handle;
13812         struct mlx5_priv *priv = dev->data->dev_private;
13813         struct mlx5_flow_meter_info *fm = NULL;
13814         uint32_t srss = 0;
13815
13816         if (!flow)
13817                 return;
13818         flow_dv_remove(dev, flow);
13819         if (flow->counter) {
13820                 flow_dv_counter_free(dev, flow->counter);
13821                 flow->counter = 0;
13822         }
13823         if (flow->meter) {
13824                 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
13825                 if (fm)
13826                         mlx5_flow_meter_detach(priv, fm);
13827                 flow->meter = 0;
13828         }
13829         /* Keep the current age handling by default. */
13830         if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
13831                 flow_dv_aso_ct_release(dev, flow->ct);
13832         else if (flow->age)
13833                 flow_dv_aso_age_release(dev, flow->age);
13834         if (flow->geneve_tlv_option) {
13835                 flow_dv_geneve_tlv_option_resource_release(dev);
13836                 flow->geneve_tlv_option = 0;
13837         }
13838         while (flow->dev_handles) {
13839                 uint32_t tmp_idx = flow->dev_handles;
13840
13841                 dev_handle = mlx5_ipool_get(priv->sh->ipool
13842                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
13843                 if (!dev_handle)
13844                         return;
13845                 flow->dev_handles = dev_handle->next.next;
13846                 if (dev_handle->dvh.matcher)
13847                         flow_dv_matcher_release(dev, dev_handle);
13848                 if (dev_handle->dvh.rix_sample)
13849                         flow_dv_sample_resource_release(dev, dev_handle);
13850                 if (dev_handle->dvh.rix_dest_array)
13851                         flow_dv_dest_array_resource_release(dev, dev_handle);
13852                 if (dev_handle->dvh.rix_encap_decap)
13853                         flow_dv_encap_decap_resource_release(dev,
13854                                 dev_handle->dvh.rix_encap_decap);
13855                 if (dev_handle->dvh.modify_hdr)
13856                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
13857                 if (dev_handle->dvh.rix_push_vlan)
13858                         flow_dv_push_vlan_action_resource_release(dev,
13859                                                                   dev_handle);
13860                 if (dev_handle->dvh.rix_tag)
13861                         flow_dv_tag_release(dev,
13862                                             dev_handle->dvh.rix_tag);
13863                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
13864                         flow_dv_fate_resource_release(dev, dev_handle);
13865                 else if (!srss)
13866                         srss = dev_handle->rix_srss;
13867                 if (fm && dev_handle->is_meter_flow_id &&
13868                     dev_handle->split_flow_id)
13869                         mlx5_ipool_free(fm->flow_ipool,
13870                                         dev_handle->split_flow_id);
13871                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
13872                            tmp_idx);
13873         }
13874         if (srss)
13875                 flow_dv_shared_rss_action_release(dev, srss);
13876 }
13877
13878 /**
13879  * Release array of hash RX queue objects.
13880  * Helper function.
13881  *
13882  * @param[in] dev
13883  *   Pointer to the Ethernet device structure.
13884  * @param[in, out] hrxqs
13885  *   Array of hash RX queue objects.
13886  *
13887  * @return
13888  *   Total number of references to hash RX queue objects in *hrxqs* array
13889  *   after this operation.
13890  */
13891 static int
13892 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
13893                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
13894 {
13895         size_t i;
13896         int remaining = 0;
13897
13898         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
13899                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
13900
13901                 if (!ret)
13902                         (*hrxqs)[i] = 0;
13903                 remaining += ret;
13904         }
13905         return remaining;
13906 }
13907
13908 /**
13909  * Release all hash RX queue objects representing shared RSS action.
13910  *
13911  * @param[in] dev
13912  *   Pointer to the Ethernet device structure.
13913  * @param[in, out] action
13914  *   Shared RSS action to remove hash RX queue objects from.
13915  *
13916  * @return
13917  *   Total number of references to hash RX queue objects stored in *action*
13918  *   after this operation.
13919  *   Expected to be 0 if no external references held.
13920  */
13921 static int
13922 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
13923                                  struct mlx5_shared_action_rss *shared_rss)
13924 {
13925         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
13926 }
13927
13928 /**
13929  * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
13930  * user input.
13931  *
13932  * Only one hash value is available for one L3+L4 combination:
13933  * for example:
13934  * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
13935  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
13936  * same slot in mlx5_rss_hash_fields.
13937  *
13938  * @param[in] rss
13939  *   Pointer to the shared action RSS conf.
13940  * @param[in, out] hash_field
13941  *   hash_field variable needed to be adjusted.
13942  *
13943  * @return
13944  *   void
13945  */
13946 static void
13947 __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
13948                                      uint64_t *hash_field)
13949 {
13950         uint64_t rss_types = rss->origin.types;
13951
13952         switch (*hash_field & ~IBV_RX_HASH_INNER) {
13953         case MLX5_RSS_HASH_IPV4:
13954                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
13955                         *hash_field &= ~MLX5_RSS_HASH_IPV4;
13956                         if (rss_types & ETH_RSS_L3_DST_ONLY)
13957                                 *hash_field |= IBV_RX_HASH_DST_IPV4;
13958                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
13959                                 *hash_field |= IBV_RX_HASH_SRC_IPV4;
13960                         else
13961                                 *hash_field |= MLX5_RSS_HASH_IPV4;
13962                 }
13963                 return;
13964         case MLX5_RSS_HASH_IPV6:
13965                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
13966                         *hash_field &= ~MLX5_RSS_HASH_IPV6;
13967                         if (rss_types & ETH_RSS_L3_DST_ONLY)
13968                                 *hash_field |= IBV_RX_HASH_DST_IPV6;
13969                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
13970                                 *hash_field |= IBV_RX_HASH_SRC_IPV6;
13971                         else
13972                                 *hash_field |= MLX5_RSS_HASH_IPV6;
13973                 }
13974                 return;
13975         case MLX5_RSS_HASH_IPV4_UDP:
13976                 /* fall-through. */
13977         case MLX5_RSS_HASH_IPV6_UDP:
13978                 if (rss_types & ETH_RSS_UDP) {
13979                         *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
13980                         if (rss_types & ETH_RSS_L4_DST_ONLY)
13981                                 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
13982                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
13983                                 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
13984                         else
13985                                 *hash_field |= MLX5_UDP_IBV_RX_HASH;
13986                 }
13987                 return;
13988         case MLX5_RSS_HASH_IPV4_TCP:
13989                 /* fall-through. */
13990         case MLX5_RSS_HASH_IPV6_TCP:
13991                 if (rss_types & ETH_RSS_TCP) {
13992                         *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
13993                         if (rss_types & ETH_RSS_L4_DST_ONLY)
13994                                 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
13995                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
13996                                 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
13997                         else
13998                                 *hash_field |= MLX5_TCP_IBV_RX_HASH;
13999                 }
14000                 return;
14001         default:
14002                 return;
14003         }
14004 }
14005
14006 /**
14007  * Setup shared RSS action.
14008  * Prepare set of hash RX queue objects sufficient to handle all valid
14009  * hash_fields combinations (see enum ibv_rx_hash_fields).
14010  *
14011  * @param[in] dev
14012  *   Pointer to the Ethernet device structure.
14013  * @param[in] action_idx
14014  *   Shared RSS action ipool index.
14015  * @param[in, out] action
14016  *   Partially initialized shared RSS action.
14017  * @param[out] error
14018  *   Perform verbose error reporting if not NULL. Initialized in case of
14019  *   error only.
14020  *
14021  * @return
14022  *   0 on success, otherwise negative errno value.
14023  */
14024 static int
14025 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
14026                            uint32_t action_idx,
14027                            struct mlx5_shared_action_rss *shared_rss,
14028                            struct rte_flow_error *error)
14029 {
14030         struct mlx5_flow_rss_desc rss_desc = { 0 };
14031         size_t i;
14032         int err;
14033
14034         if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
14035                 return rte_flow_error_set(error, rte_errno,
14036                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14037                                           "cannot setup indirection table");
14038         }
14039         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
14040         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
14041         rss_desc.const_q = shared_rss->origin.queue;
14042         rss_desc.queue_num = shared_rss->origin.queue_num;
14043         /* Set non-zero value to indicate a shared RSS. */
14044         rss_desc.shared_rss = action_idx;
14045         rss_desc.ind_tbl = shared_rss->ind_tbl;
14046         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
14047                 uint32_t hrxq_idx;
14048                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
14049                 int tunnel = 0;
14050
14051                 __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
14052                 if (shared_rss->origin.level > 1) {
14053                         hash_fields |= IBV_RX_HASH_INNER;
14054                         tunnel = 1;
14055                 }
14056                 rss_desc.tunnel = tunnel;
14057                 rss_desc.hash_fields = hash_fields;
14058                 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
14059                 if (!hrxq_idx) {
14060                         rte_flow_error_set
14061                                 (error, rte_errno,
14062                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14063                                  "cannot get hash queue");
14064                         goto error_hrxq_new;
14065                 }
14066                 err = __flow_dv_action_rss_hrxq_set
14067                         (shared_rss, hash_fields, hrxq_idx);
14068                 MLX5_ASSERT(!err);
14069         }
14070         return 0;
14071 error_hrxq_new:
14072         err = rte_errno;
14073         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14074         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
14075                 shared_rss->ind_tbl = NULL;
14076         rte_errno = err;
14077         return -rte_errno;
14078 }
14079
14080 /**
14081  * Create shared RSS action.
14082  *
14083  * @param[in] dev
14084  *   Pointer to the Ethernet device structure.
14085  * @param[in] conf
14086  *   Shared action configuration.
14087  * @param[in] rss
14088  *   RSS action specification used to create shared action.
14089  * @param[out] error
14090  *   Perform verbose error reporting if not NULL. Initialized in case of
14091  *   error only.
14092  *
14093  * @return
14094  *   A valid shared action ID in case of success, 0 otherwise and
14095  *   rte_errno is set.
14096  */
14097 static uint32_t
14098 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
14099                             const struct rte_flow_indir_action_conf *conf,
14100                             const struct rte_flow_action_rss *rss,
14101                             struct rte_flow_error *error)
14102 {
14103         struct mlx5_priv *priv = dev->data->dev_private;
14104         struct mlx5_shared_action_rss *shared_rss = NULL;
14105         void *queue = NULL;
14106         struct rte_flow_action_rss *origin;
14107         const uint8_t *rss_key;
14108         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
14109         uint32_t idx;
14110
14111         RTE_SET_USED(conf);
14112         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14113                             0, SOCKET_ID_ANY);
14114         shared_rss = mlx5_ipool_zmalloc
14115                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
14116         if (!shared_rss || !queue) {
14117                 rte_flow_error_set(error, ENOMEM,
14118                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14119                                    "cannot allocate resource memory");
14120                 goto error_rss_init;
14121         }
14122         if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
14123                 rte_flow_error_set(error, E2BIG,
14124                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14125                                    "rss action number out of range");
14126                 goto error_rss_init;
14127         }
14128         shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
14129                                           sizeof(*shared_rss->ind_tbl),
14130                                           0, SOCKET_ID_ANY);
14131         if (!shared_rss->ind_tbl) {
14132                 rte_flow_error_set(error, ENOMEM,
14133                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14134                                    "cannot allocate resource memory");
14135                 goto error_rss_init;
14136         }
14137         memcpy(queue, rss->queue, queue_size);
14138         shared_rss->ind_tbl->queues = queue;
14139         shared_rss->ind_tbl->queues_n = rss->queue_num;
14140         origin = &shared_rss->origin;
14141         origin->func = rss->func;
14142         origin->level = rss->level;
14143         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
14144         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
14145         /* NULL RSS key indicates default RSS key. */
14146         rss_key = !rss->key ? rss_hash_default_key : rss->key;
14147         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
14148         origin->key = &shared_rss->key[0];
14149         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
14150         origin->queue = queue;
14151         origin->queue_num = rss->queue_num;
14152         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
14153                 goto error_rss_init;
14154         rte_spinlock_init(&shared_rss->action_rss_sl);
14155         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14156         rte_spinlock_lock(&priv->shared_act_sl);
14157         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14158                      &priv->rss_shared_actions, idx, shared_rss, next);
14159         rte_spinlock_unlock(&priv->shared_act_sl);
14160         return idx;
14161 error_rss_init:
14162         if (shared_rss) {
14163                 if (shared_rss->ind_tbl)
14164                         mlx5_free(shared_rss->ind_tbl);
14165                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14166                                 idx);
14167         }
14168         if (queue)
14169                 mlx5_free(queue);
14170         return 0;
14171 }
14172
14173 /**
14174  * Destroy the shared RSS action.
14175  * Release related hash RX queue objects.
14176  *
14177  * @param[in] dev
14178  *   Pointer to the Ethernet device structure.
14179  * @param[in] idx
14180  *   The shared RSS action object ID to be removed.
14181  * @param[out] error
14182  *   Perform verbose error reporting if not NULL. Initialized in case of
14183  *   error only.
14184  *
14185  * @return
14186  *   0 on success, otherwise negative errno value.
14187  */
14188 static int
14189 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
14190                              struct rte_flow_error *error)
14191 {
14192         struct mlx5_priv *priv = dev->data->dev_private;
14193         struct mlx5_shared_action_rss *shared_rss =
14194             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14195         uint32_t old_refcnt = 1;
14196         int remaining;
14197         uint16_t *queue = NULL;
14198
14199         if (!shared_rss)
14200                 return rte_flow_error_set(error, EINVAL,
14201                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14202                                           "invalid shared action");
14203         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14204         if (remaining)
14205                 return rte_flow_error_set(error, EBUSY,
14206                                           RTE_FLOW_ERROR_TYPE_ACTION,
14207                                           NULL,
14208                                           "shared rss hrxq has references");
14209         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
14210                                          0, 0, __ATOMIC_ACQUIRE,
14211                                          __ATOMIC_RELAXED))
14212                 return rte_flow_error_set(error, EBUSY,
14213                                           RTE_FLOW_ERROR_TYPE_ACTION,
14214                                           NULL,
14215                                           "shared rss has references");
14216         queue = shared_rss->ind_tbl->queues;
14217         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
14218         if (remaining)
14219                 return rte_flow_error_set(error, EBUSY,
14220                                           RTE_FLOW_ERROR_TYPE_ACTION,
14221                                           NULL,
14222                                           "shared rss indirection table has"
14223                                           " references");
14224         mlx5_free(queue);
14225         rte_spinlock_lock(&priv->shared_act_sl);
14226         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14227                      &priv->rss_shared_actions, idx, shared_rss, next);
14228         rte_spinlock_unlock(&priv->shared_act_sl);
14229         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14230                         idx);
14231         return 0;
14232 }
14233
14234 /**
14235  * Create indirect action, lock free,
14236  * (mutex should be acquired by caller).
14237  * Dispatcher for action type specific call.
14238  *
14239  * @param[in] dev
14240  *   Pointer to the Ethernet device structure.
14241  * @param[in] conf
14242  *   Shared action configuration.
14243  * @param[in] action
14244  *   Action specification used to create indirect action.
14245  * @param[out] error
14246  *   Perform verbose error reporting if not NULL. Initialized in case of
14247  *   error only.
14248  *
14249  * @return
14250  *   A valid shared action handle in case of success, NULL otherwise and
14251  *   rte_errno is set.
14252  */
14253 static struct rte_flow_action_handle *
14254 flow_dv_action_create(struct rte_eth_dev *dev,
14255                       const struct rte_flow_indir_action_conf *conf,
14256                       const struct rte_flow_action *action,
14257                       struct rte_flow_error *err)
14258 {
14259         struct mlx5_priv *priv = dev->data->dev_private;
14260         uint32_t age_idx = 0;
14261         uint32_t idx = 0;
14262         uint32_t ret = 0;
14263
14264         switch (action->type) {
14265         case RTE_FLOW_ACTION_TYPE_RSS:
14266                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
14267                 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
14268                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14269                 break;
14270         case RTE_FLOW_ACTION_TYPE_AGE:
14271                 age_idx = flow_dv_aso_age_alloc(dev, err);
14272                 if (!age_idx) {
14273                         ret = -rte_errno;
14274                         break;
14275                 }
14276                 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
14277                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
14278                 flow_dv_aso_age_params_init(dev, age_idx,
14279                                         ((const struct rte_flow_action_age *)
14280                                                 action->conf)->context ?
14281                                         ((const struct rte_flow_action_age *)
14282                                                 action->conf)->context :
14283                                         (void *)(uintptr_t)idx,
14284                                         ((const struct rte_flow_action_age *)
14285                                                 action->conf)->timeout);
14286                 ret = age_idx;
14287                 break;
14288         case RTE_FLOW_ACTION_TYPE_COUNT:
14289                 ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
14290                 idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
14291                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14292                 break;
14293         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
14294                 ret = flow_dv_translate_create_conntrack(dev, action->conf,
14295                                                          err);
14296                 idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
14297                 break;
14298         default:
14299                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
14300                                    NULL, "action type not supported");
14301                 break;
14302         }
14303         return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
14304 }
14305
14306 /**
14307  * Destroy the indirect action.
14308  * Release action related resources on the NIC and the memory.
14309  * Lock free, (mutex should be acquired by caller).
14310  * Dispatcher for action type specific call.
14311  *
14312  * @param[in] dev
14313  *   Pointer to the Ethernet device structure.
14314  * @param[in] handle
14315  *   The indirect action object handle to be removed.
14316  * @param[out] error
14317  *   Perform verbose error reporting if not NULL. Initialized in case of
14318  *   error only.
14319  *
14320  * @return
14321  *   0 on success, otherwise negative errno value.
14322  */
14323 static int
14324 flow_dv_action_destroy(struct rte_eth_dev *dev,
14325                        struct rte_flow_action_handle *handle,
14326                        struct rte_flow_error *error)
14327 {
14328         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14329         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14330         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14331         struct mlx5_flow_counter *cnt;
14332         uint32_t no_flow_refcnt = 1;
14333         int ret;
14334
14335         switch (type) {
14336         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14337                 return __flow_dv_action_rss_release(dev, idx, error);
14338         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
14339                 cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
14340                 if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
14341                                                  &no_flow_refcnt, 1, false,
14342                                                  __ATOMIC_ACQUIRE,
14343                                                  __ATOMIC_RELAXED))
14344                         return rte_flow_error_set(error, EBUSY,
14345                                                   RTE_FLOW_ERROR_TYPE_ACTION,
14346                                                   NULL,
14347                                                   "Indirect count action has references");
14348                 flow_dv_counter_free(dev, idx);
14349                 return 0;
14350         case MLX5_INDIRECT_ACTION_TYPE_AGE:
14351                 ret = flow_dv_aso_age_release(dev, idx);
14352                 if (ret)
14353                         /*
14354                          * In this case, the last flow has a reference will
14355                          * actually release the age action.
14356                          */
14357                         DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
14358                                 " released with references %d.", idx, ret);
14359                 return 0;
14360         case MLX5_INDIRECT_ACTION_TYPE_CT:
14361                 ret = flow_dv_aso_ct_release(dev, idx);
14362                 if (ret < 0)
14363                         return ret;
14364                 if (ret > 0)
14365                         DRV_LOG(DEBUG, "Connection tracking object %u still "
14366                                 "has references %d.", idx, ret);
14367                 return 0;
14368         default:
14369                 return rte_flow_error_set(error, ENOTSUP,
14370                                           RTE_FLOW_ERROR_TYPE_ACTION,
14371                                           NULL,
14372                                           "action type not supported");
14373         }
14374 }
14375
14376 /**
14377  * Updates in place shared RSS action configuration.
14378  *
14379  * @param[in] dev
14380  *   Pointer to the Ethernet device structure.
14381  * @param[in] idx
14382  *   The shared RSS action object ID to be updated.
14383  * @param[in] action_conf
14384  *   RSS action specification used to modify *shared_rss*.
14385  * @param[out] error
14386  *   Perform verbose error reporting if not NULL. Initialized in case of
14387  *   error only.
14388  *
14389  * @return
14390  *   0 on success, otherwise negative errno value.
14391  * @note: currently only support update of RSS queues.
14392  */
14393 static int
14394 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
14395                             const struct rte_flow_action_rss *action_conf,
14396                             struct rte_flow_error *error)
14397 {
14398         struct mlx5_priv *priv = dev->data->dev_private;
14399         struct mlx5_shared_action_rss *shared_rss =
14400             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14401         int ret = 0;
14402         void *queue = NULL;
14403         uint16_t *queue_old = NULL;
14404         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
14405
14406         if (!shared_rss)
14407                 return rte_flow_error_set(error, EINVAL,
14408                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14409                                           "invalid shared action to update");
14410         if (priv->obj_ops.ind_table_modify == NULL)
14411                 return rte_flow_error_set(error, ENOTSUP,
14412                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14413                                           "cannot modify indirection table");
14414         queue = mlx5_malloc(MLX5_MEM_ZERO,
14415                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14416                             0, SOCKET_ID_ANY);
14417         if (!queue)
14418                 return rte_flow_error_set(error, ENOMEM,
14419                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14420                                           NULL,
14421                                           "cannot allocate resource memory");
14422         memcpy(queue, action_conf->queue, queue_size);
14423         MLX5_ASSERT(shared_rss->ind_tbl);
14424         rte_spinlock_lock(&shared_rss->action_rss_sl);
14425         queue_old = shared_rss->ind_tbl->queues;
14426         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
14427                                         queue, action_conf->queue_num, true);
14428         if (ret) {
14429                 mlx5_free(queue);
14430                 ret = rte_flow_error_set(error, rte_errno,
14431                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14432                                           "cannot update indirection table");
14433         } else {
14434                 mlx5_free(queue_old);
14435                 shared_rss->origin.queue = queue;
14436                 shared_rss->origin.queue_num = action_conf->queue_num;
14437         }
14438         rte_spinlock_unlock(&shared_rss->action_rss_sl);
14439         return ret;
14440 }
14441
14442 /*
14443  * Updates in place conntrack context or direction.
14444  * Context update should be synchronized.
14445  *
14446  * @param[in] dev
14447  *   Pointer to the Ethernet device structure.
14448  * @param[in] idx
14449  *   The conntrack object ID to be updated.
14450  * @param[in] update
14451  *   Pointer to the structure of information to update.
14452  * @param[out] error
14453  *   Perform verbose error reporting if not NULL. Initialized in case of
14454  *   error only.
14455  *
14456  * @return
14457  *   0 on success, otherwise negative errno value.
14458  */
14459 static int
14460 __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,
14461                            const struct rte_flow_modify_conntrack *update,
14462                            struct rte_flow_error *error)
14463 {
14464         struct mlx5_priv *priv = dev->data->dev_private;
14465         struct mlx5_aso_ct_action *ct;
14466         const struct rte_flow_action_conntrack *new_prf;
14467         int ret = 0;
14468         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
14469         uint32_t dev_idx;
14470
14471         if (PORT_ID(priv) != owner)
14472                 return rte_flow_error_set(error, EACCES,
14473                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14474                                           NULL,
14475                                           "CT object owned by another port");
14476         dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
14477         ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
14478         if (!ct->refcnt)
14479                 return rte_flow_error_set(error, ENOMEM,
14480                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14481                                           NULL,
14482                                           "CT object is inactive");
14483         new_prf = &update->new_ct;
14484         if (update->direction)
14485                 ct->is_original = !!new_prf->is_original_dir;
14486         if (update->state) {
14487                 /* Only validate the profile when it needs to be updated. */
14488                 ret = mlx5_validate_action_ct(dev, new_prf, error);
14489                 if (ret)
14490                         return ret;
14491                 ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
14492                 if (ret)
14493                         return rte_flow_error_set(error, EIO,
14494                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14495                                         NULL,
14496                                         "Failed to send CT context update WQE");
14497                 /* Block until ready or a failure. */
14498                 ret = mlx5_aso_ct_available(priv->sh, ct);
14499                 if (ret)
14500                         rte_flow_error_set(error, rte_errno,
14501                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14502                                            NULL,
14503                                            "Timeout to get the CT update");
14504         }
14505         return ret;
14506 }
14507
14508 /**
14509  * Updates in place shared action configuration, lock free,
14510  * (mutex should be acquired by caller).
14511  *
14512  * @param[in] dev
14513  *   Pointer to the Ethernet device structure.
14514  * @param[in] handle
14515  *   The indirect action object handle to be updated.
14516  * @param[in] update
14517  *   Action specification used to modify the action pointed by *handle*.
14518  *   *update* could be of same type with the action pointed by the *handle*
14519  *   handle argument, or some other structures like a wrapper, depending on
14520  *   the indirect action type.
14521  * @param[out] error
14522  *   Perform verbose error reporting if not NULL. Initialized in case of
14523  *   error only.
14524  *
14525  * @return
14526  *   0 on success, otherwise negative errno value.
14527  */
14528 static int
14529 flow_dv_action_update(struct rte_eth_dev *dev,
14530                         struct rte_flow_action_handle *handle,
14531                         const void *update,
14532                         struct rte_flow_error *err)
14533 {
14534         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14535         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14536         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14537         const void *action_conf;
14538
14539         switch (type) {
14540         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14541                 action_conf = ((const struct rte_flow_action *)update)->conf;
14542                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
14543         case MLX5_INDIRECT_ACTION_TYPE_CT:
14544                 return __flow_dv_action_ct_update(dev, idx, update, err);
14545         default:
14546                 return rte_flow_error_set(err, ENOTSUP,
14547                                           RTE_FLOW_ERROR_TYPE_ACTION,
14548                                           NULL,
14549                                           "action type update not supported");
14550         }
14551 }
14552
14553 /**
14554  * Destroy the meter sub policy table rules.
14555  * Lock free, (mutex should be acquired by caller).
14556  *
14557  * @param[in] dev
14558  *   Pointer to Ethernet device.
14559  * @param[in] sub_policy
14560  *   Pointer to meter sub policy table.
14561  */
14562 static void
14563 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
14564                              struct mlx5_flow_meter_sub_policy *sub_policy)
14565 {
14566         struct mlx5_flow_tbl_data_entry *tbl;
14567         int i;
14568
14569         for (i = 0; i < RTE_COLORS; i++) {
14570                 if (sub_policy->color_rule[i]) {
14571                         claim_zero(mlx5_flow_os_destroy_flow
14572                                 (sub_policy->color_rule[i]));
14573                         sub_policy->color_rule[i] = NULL;
14574                 }
14575                 if (sub_policy->color_matcher[i]) {
14576                         tbl = container_of(sub_policy->color_matcher[i]->tbl,
14577                                 typeof(*tbl), tbl);
14578                         mlx5_cache_unregister(&tbl->matchers,
14579                                       &sub_policy->color_matcher[i]->entry);
14580                         sub_policy->color_matcher[i] = NULL;
14581                 }
14582         }
14583         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
14584                 if (sub_policy->rix_hrxq[i]) {
14585                         mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
14586                         sub_policy->rix_hrxq[i] = 0;
14587                 }
14588                 if (sub_policy->jump_tbl[i]) {
14589                         flow_dv_tbl_resource_release(MLX5_SH(dev),
14590                         sub_policy->jump_tbl[i]);
14591                         sub_policy->jump_tbl[i] = NULL;
14592                 }
14593         }
14594         if (sub_policy->tbl_rsc) {
14595                 flow_dv_tbl_resource_release(MLX5_SH(dev),
14596                         sub_policy->tbl_rsc);
14597                 sub_policy->tbl_rsc = NULL;
14598         }
14599 }
14600
14601 /**
14602  * Destroy policy rules, lock free,
14603  * (mutex should be acquired by caller).
14604  * Dispatcher for action type specific call.
14605  *
14606  * @param[in] dev
14607  *   Pointer to the Ethernet device structure.
14608  * @param[in] mtr_policy
14609  *   Meter policy struct.
14610  */
14611 static void
14612 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
14613                       struct mlx5_flow_meter_policy *mtr_policy)
14614 {
14615         uint32_t i, j;
14616         struct mlx5_flow_meter_sub_policy *sub_policy;
14617         uint16_t sub_policy_num;
14618
14619         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14620                 sub_policy_num = (mtr_policy->sub_policy_num >>
14621                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
14622                         MLX5_MTR_SUB_POLICY_NUM_MASK;
14623                 for (j = 0; j < sub_policy_num; j++) {
14624                         sub_policy = mtr_policy->sub_policys[i][j];
14625                         if (sub_policy)
14626                                 __flow_dv_destroy_sub_policy_rules
14627                                                 (dev, sub_policy);
14628                 }
14629         }
14630 }
14631
14632 /**
14633  * Destroy policy action, lock free,
14634  * (mutex should be acquired by caller).
14635  * Dispatcher for action type specific call.
14636  *
14637  * @param[in] dev
14638  *   Pointer to the Ethernet device structure.
14639  * @param[in] mtr_policy
14640  *   Meter policy struct.
14641  */
14642 static void
14643 flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
14644                       struct mlx5_flow_meter_policy *mtr_policy)
14645 {
14646         struct rte_flow_action *rss_action;
14647         struct mlx5_flow_handle dev_handle;
14648         uint32_t i, j;
14649
14650         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
14651                 if (mtr_policy->act_cnt[i].rix_mark) {
14652                         flow_dv_tag_release(dev,
14653                                 mtr_policy->act_cnt[i].rix_mark);
14654                         mtr_policy->act_cnt[i].rix_mark = 0;
14655                 }
14656                 if (mtr_policy->act_cnt[i].modify_hdr) {
14657                         dev_handle.dvh.modify_hdr =
14658                                 mtr_policy->act_cnt[i].modify_hdr;
14659                         flow_dv_modify_hdr_resource_release(dev, &dev_handle);
14660                 }
14661                 switch (mtr_policy->act_cnt[i].fate_action) {
14662                 case MLX5_FLOW_FATE_SHARED_RSS:
14663                         rss_action = mtr_policy->act_cnt[i].rss;
14664                         mlx5_free(rss_action);
14665                         break;
14666                 case MLX5_FLOW_FATE_PORT_ID:
14667                         if (mtr_policy->act_cnt[i].rix_port_id_action) {
14668                                 flow_dv_port_id_action_resource_release(dev,
14669                                 mtr_policy->act_cnt[i].rix_port_id_action);
14670                                 mtr_policy->act_cnt[i].rix_port_id_action = 0;
14671                         }
14672                         break;
14673                 case MLX5_FLOW_FATE_DROP:
14674                 case MLX5_FLOW_FATE_JUMP:
14675                         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
14676                                 mtr_policy->act_cnt[i].dr_jump_action[j] =
14677                                                 NULL;
14678                         break;
14679                 default:
14680                         /*Queue action do nothing*/
14681                         break;
14682                 }
14683         }
14684         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
14685                 mtr_policy->dr_drop_action[j] = NULL;
14686 }
14687
14688 /**
14689  * Create policy action per domain, lock free,
14690  * (mutex should be acquired by caller).
14691  * Dispatcher for action type specific call.
14692  *
14693  * @param[in] dev
14694  *   Pointer to the Ethernet device structure.
14695  * @param[in] mtr_policy
14696  *   Meter policy struct.
14697  * @param[in] action
14698  *   Action specification used to create meter actions.
14699  * @param[out] error
14700  *   Perform verbose error reporting if not NULL. Initialized in case of
14701  *   error only.
14702  *
14703  * @return
14704  *   0 on success, otherwise negative errno value.
14705  */
14706 static int
14707 __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
14708                         struct mlx5_flow_meter_policy *mtr_policy,
14709                         const struct rte_flow_action *actions[RTE_COLORS],
14710                         enum mlx5_meter_domain domain,
14711                         struct rte_mtr_error *error)
14712 {
14713         struct mlx5_priv *priv = dev->data->dev_private;
14714         struct rte_flow_error flow_err;
14715         const struct rte_flow_action *act;
14716         uint64_t action_flags = 0;
14717         struct mlx5_flow_handle dh;
14718         struct mlx5_flow dev_flow;
14719         struct mlx5_flow_dv_port_id_action_resource port_id_action;
14720         int i, ret;
14721         uint8_t egress, transfer;
14722         struct mlx5_meter_policy_action_container *act_cnt = NULL;
14723         union {
14724                 struct mlx5_flow_dv_modify_hdr_resource res;
14725                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
14726                             sizeof(struct mlx5_modification_cmd) *
14727                             (MLX5_MAX_MODIFY_NUM + 1)];
14728         } mhdr_dummy;
14729
14730         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
14731         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
14732         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
14733         memset(&dev_flow, 0, sizeof(struct mlx5_flow));
14734         memset(&port_id_action, 0,
14735                 sizeof(struct mlx5_flow_dv_port_id_action_resource));
14736         dev_flow.handle = &dh;
14737         dev_flow.dv.port_id_action = &port_id_action;
14738         dev_flow.external = true;
14739         for (i = 0; i < RTE_COLORS; i++) {
14740                 if (i < MLX5_MTR_RTE_COLORS)
14741                         act_cnt = &mtr_policy->act_cnt[i];
14742                 for (act = actions[i];
14743                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
14744                         act++) {
14745                         switch (act->type) {
14746                         case RTE_FLOW_ACTION_TYPE_MARK:
14747                         {
14748                                 uint32_t tag_be = mlx5_flow_mark_set
14749                                         (((const struct rte_flow_action_mark *)
14750                                         (act->conf))->id);
14751
14752                                 if (i >= MLX5_MTR_RTE_COLORS)
14753                                         return -rte_mtr_error_set(error,
14754                                           ENOTSUP,
14755                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14756                                           NULL,
14757                                           "cannot create policy "
14758                                           "mark action for this color");
14759                                 dev_flow.handle->mark = 1;
14760                                 if (flow_dv_tag_resource_register(dev, tag_be,
14761                                                   &dev_flow, &flow_err))
14762                                         return -rte_mtr_error_set(error,
14763                                         ENOTSUP,
14764                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14765                                         NULL,
14766                                         "cannot setup policy mark action");
14767                                 MLX5_ASSERT(dev_flow.dv.tag_resource);
14768                                 act_cnt->rix_mark =
14769                                         dev_flow.handle->dvh.rix_tag;
14770                                 action_flags |= MLX5_FLOW_ACTION_MARK;
14771                                 break;
14772                         }
14773                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
14774                         {
14775                                 struct mlx5_flow_dv_modify_hdr_resource
14776                                         *mhdr_res = &mhdr_dummy.res;
14777
14778                                 if (i >= MLX5_MTR_RTE_COLORS)
14779                                         return -rte_mtr_error_set(error,
14780                                           ENOTSUP,
14781                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14782                                           NULL,
14783                                           "cannot create policy "
14784                                           "set tag action for this color");
14785                                 memset(mhdr_res, 0, sizeof(*mhdr_res));
14786                                 mhdr_res->ft_type = transfer ?
14787                                         MLX5DV_FLOW_TABLE_TYPE_FDB :
14788                                         egress ?
14789                                         MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
14790                                         MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
14791                                 if (flow_dv_convert_action_set_tag
14792                                 (dev, mhdr_res,
14793                                 (const struct rte_flow_action_set_tag *)
14794                                 act->conf,  &flow_err))
14795                                         return -rte_mtr_error_set(error,
14796                                         ENOTSUP,
14797                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14798                                         NULL, "cannot convert policy "
14799                                         "set tag action");
14800                                 if (!mhdr_res->actions_num)
14801                                         return -rte_mtr_error_set(error,
14802                                         ENOTSUP,
14803                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14804                                         NULL, "cannot find policy "
14805                                         "set tag action");
14806                                 /* create modify action if needed. */
14807                                 dev_flow.dv.group = 1;
14808                                 if (flow_dv_modify_hdr_resource_register
14809                                         (dev, mhdr_res, &dev_flow, &flow_err))
14810                                         return -rte_mtr_error_set(error,
14811                                         ENOTSUP,
14812                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14813                                         NULL, "cannot register policy "
14814                                         "set tag action");
14815                                 act_cnt->modify_hdr =
14816                                 dev_flow.handle->dvh.modify_hdr;
14817                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
14818                                 break;
14819                         }
14820                         case RTE_FLOW_ACTION_TYPE_DROP:
14821                         {
14822                                 struct mlx5_flow_mtr_mng *mtrmng =
14823                                                 priv->sh->mtrmng;
14824                                 struct mlx5_flow_tbl_data_entry *tbl_data;
14825
14826                                 /*
14827                                  * Create the drop table with
14828                                  * METER DROP level.
14829                                  */
14830                                 if (!mtrmng->drop_tbl[domain]) {
14831                                         mtrmng->drop_tbl[domain] =
14832                                         flow_dv_tbl_resource_get(dev,
14833                                         MLX5_FLOW_TABLE_LEVEL_METER,
14834                                         egress, transfer, false, NULL, 0,
14835                                         0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
14836                                         if (!mtrmng->drop_tbl[domain])
14837                                                 return -rte_mtr_error_set
14838                                         (error, ENOTSUP,
14839                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14840                                         NULL,
14841                                         "Failed to create meter drop table");
14842                                 }
14843                                 tbl_data = container_of
14844                                 (mtrmng->drop_tbl[domain],
14845                                 struct mlx5_flow_tbl_data_entry, tbl);
14846                                 if (i < MLX5_MTR_RTE_COLORS) {
14847                                         act_cnt->dr_jump_action[domain] =
14848                                                 tbl_data->jump.action;
14849                                         act_cnt->fate_action =
14850                                                 MLX5_FLOW_FATE_DROP;
14851                                 }
14852                                 if (i == RTE_COLOR_RED)
14853                                         mtr_policy->dr_drop_action[domain] =
14854                                                 tbl_data->jump.action;
14855                                 action_flags |= MLX5_FLOW_ACTION_DROP;
14856                                 break;
14857                         }
14858                         case RTE_FLOW_ACTION_TYPE_QUEUE:
14859                         {
14860                                 if (i >= MLX5_MTR_RTE_COLORS)
14861                                         return -rte_mtr_error_set(error,
14862                                         ENOTSUP,
14863                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14864                                         NULL, "cannot create policy "
14865                                         "fate queue for this color");
14866                                 act_cnt->queue =
14867                                 ((const struct rte_flow_action_queue *)
14868                                         (act->conf))->index;
14869                                 act_cnt->fate_action =
14870                                         MLX5_FLOW_FATE_QUEUE;
14871                                 dev_flow.handle->fate_action =
14872                                         MLX5_FLOW_FATE_QUEUE;
14873                                 mtr_policy->is_queue = 1;
14874                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
14875                                 break;
14876                         }
14877                         case RTE_FLOW_ACTION_TYPE_RSS:
14878                         {
14879                                 int rss_size;
14880
14881                                 if (i >= MLX5_MTR_RTE_COLORS)
14882                                         return -rte_mtr_error_set(error,
14883                                           ENOTSUP,
14884                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14885                                           NULL,
14886                                           "cannot create policy "
14887                                           "rss action for this color");
14888                                 /*
14889                                  * Save RSS conf into policy struct
14890                                  * for translate stage.
14891                                  */
14892                                 rss_size = (int)rte_flow_conv
14893                                         (RTE_FLOW_CONV_OP_ACTION,
14894                                         NULL, 0, act, &flow_err);
14895                                 if (rss_size <= 0)
14896                                         return -rte_mtr_error_set(error,
14897                                           ENOTSUP,
14898                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14899                                           NULL, "Get the wrong "
14900                                           "rss action struct size");
14901                                 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
14902                                                 rss_size, 0, SOCKET_ID_ANY);
14903                                 if (!act_cnt->rss)
14904                                         return -rte_mtr_error_set(error,
14905                                           ENOTSUP,
14906                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14907                                           NULL,
14908                                           "Fail to malloc rss action memory");
14909                                 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
14910                                         act_cnt->rss, rss_size,
14911                                         act, &flow_err);
14912                                 if (ret < 0)
14913                                         return -rte_mtr_error_set(error,
14914                                           ENOTSUP,
14915                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14916                                           NULL, "Fail to save "
14917                                           "rss action into policy struct");
14918                                 act_cnt->fate_action =
14919                                         MLX5_FLOW_FATE_SHARED_RSS;
14920                                 action_flags |= MLX5_FLOW_ACTION_RSS;
14921                                 break;
14922                         }
14923                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
14924                         {
14925                                 struct mlx5_flow_dv_port_id_action_resource
14926                                         port_id_resource;
14927                                 uint32_t port_id = 0;
14928
14929                                 if (i >= MLX5_MTR_RTE_COLORS)
14930                                         return -rte_mtr_error_set(error,
14931                                         ENOTSUP,
14932                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14933                                         NULL, "cannot create policy "
14934                                         "port action for this color");
14935                                 memset(&port_id_resource, 0,
14936                                         sizeof(port_id_resource));
14937                                 if (flow_dv_translate_action_port_id(dev, act,
14938                                                 &port_id, &flow_err))
14939                                         return -rte_mtr_error_set(error,
14940                                         ENOTSUP,
14941                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14942                                         NULL, "cannot translate "
14943                                         "policy port action");
14944                                 port_id_resource.port_id = port_id;
14945                                 if (flow_dv_port_id_action_resource_register
14946                                         (dev, &port_id_resource,
14947                                         &dev_flow, &flow_err))
14948                                         return -rte_mtr_error_set(error,
14949                                         ENOTSUP,
14950                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14951                                         NULL, "cannot setup "
14952                                         "policy port action");
14953                                 act_cnt->rix_port_id_action =
14954                                         dev_flow.handle->rix_port_id_action;
14955                                 act_cnt->fate_action =
14956                                         MLX5_FLOW_FATE_PORT_ID;
14957                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
14958                                 break;
14959                         }
14960                         case RTE_FLOW_ACTION_TYPE_JUMP:
14961                         {
14962                                 uint32_t jump_group = 0;
14963                                 uint32_t table = 0;
14964                                 struct mlx5_flow_tbl_data_entry *tbl_data;
14965                                 struct flow_grp_info grp_info = {
14966                                         .external = !!dev_flow.external,
14967                                         .transfer = !!transfer,
14968                                         .fdb_def_rule = !!priv->fdb_def_rule,
14969                                         .std_tbl_fix = 0,
14970                                         .skip_scale = dev_flow.skip_scale &
14971                                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
14972                                 };
14973                                 struct mlx5_flow_meter_sub_policy *sub_policy =
14974                                 mtr_policy->sub_policys[domain][0];
14975
14976                                 if (i >= MLX5_MTR_RTE_COLORS)
14977                                         return -rte_mtr_error_set(error,
14978                                           ENOTSUP,
14979                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14980                                           NULL,
14981                                           "cannot create policy "
14982                                           "jump action for this color");
14983                                 jump_group =
14984                                 ((const struct rte_flow_action_jump *)
14985                                                         act->conf)->group;
14986                                 if (mlx5_flow_group_to_table(dev, NULL,
14987                                                        jump_group,
14988                                                        &table,
14989                                                        &grp_info, &flow_err))
14990                                         return -rte_mtr_error_set(error,
14991                                         ENOTSUP,
14992                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14993                                         NULL, "cannot setup "
14994                                         "policy jump action");
14995                                 sub_policy->jump_tbl[i] =
14996                                 flow_dv_tbl_resource_get(dev,
14997                                         table, egress,
14998                                         transfer,
14999                                         !!dev_flow.external,
15000                                         NULL, jump_group, 0,
15001                                         0, &flow_err);
15002                                 if
15003                                 (!sub_policy->jump_tbl[i])
15004                                         return  -rte_mtr_error_set(error,
15005                                         ENOTSUP,
15006                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15007                                         NULL, "cannot create jump action.");
15008                                 tbl_data = container_of
15009                                 (sub_policy->jump_tbl[i],
15010                                 struct mlx5_flow_tbl_data_entry, tbl);
15011                                 act_cnt->dr_jump_action[domain] =
15012                                         tbl_data->jump.action;
15013                                 act_cnt->fate_action =
15014                                         MLX5_FLOW_FATE_JUMP;
15015                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
15016                                 break;
15017                         }
15018                         default:
15019                                 return -rte_mtr_error_set(error, ENOTSUP,
15020                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15021                                           NULL, "action type not supported");
15022                         }
15023                 }
15024         }
15025         return 0;
15026 }
15027
15028 /**
15029  * Create policy action per domain, lock free,
15030  * (mutex should be acquired by caller).
15031  * Dispatcher for action type specific call.
15032  *
15033  * @param[in] dev
15034  *   Pointer to the Ethernet device structure.
15035  * @param[in] mtr_policy
15036  *   Meter policy struct.
15037  * @param[in] action
15038  *   Action specification used to create meter actions.
15039  * @param[out] error
15040  *   Perform verbose error reporting if not NULL. Initialized in case of
15041  *   error only.
15042  *
15043  * @return
15044  *   0 on success, otherwise negative errno value.
15045  */
15046 static int
15047 flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
15048                       struct mlx5_flow_meter_policy *mtr_policy,
15049                       const struct rte_flow_action *actions[RTE_COLORS],
15050                       struct rte_mtr_error *error)
15051 {
15052         int ret, i;
15053         uint16_t sub_policy_num;
15054
15055         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15056                 sub_policy_num = (mtr_policy->sub_policy_num >>
15057                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15058                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15059                 if (sub_policy_num) {
15060                         ret = __flow_dv_create_domain_policy_acts(dev,
15061                                 mtr_policy, actions,
15062                                 (enum mlx5_meter_domain)i, error);
15063                         if (ret)
15064                                 return ret;
15065                 }
15066         }
15067         return 0;
15068 }
15069
15070 /**
15071  * Query a DV flow rule for its statistics via DevX.
15072  *
15073  * @param[in] dev
15074  *   Pointer to Ethernet device.
15075  * @param[in] cnt_idx
15076  *   Index to the flow counter.
15077  * @param[out] data
15078  *   Data retrieved by the query.
15079  * @param[out] error
15080  *   Perform verbose error reporting if not NULL.
15081  *
15082  * @return
15083  *   0 on success, a negative errno value otherwise and rte_errno is set.
15084  */
15085 static int
15086 flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
15087                     struct rte_flow_error *error)
15088 {
15089         struct mlx5_priv *priv = dev->data->dev_private;
15090         struct rte_flow_query_count *qc = data;
15091
15092         if (!priv->config.devx)
15093                 return rte_flow_error_set(error, ENOTSUP,
15094                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15095                                           NULL,
15096                                           "counters are not supported");
15097         if (cnt_idx) {
15098                 uint64_t pkts, bytes;
15099                 struct mlx5_flow_counter *cnt;
15100                 int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
15101
15102                 if (err)
15103                         return rte_flow_error_set(error, -err,
15104                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15105                                         NULL, "cannot read counters");
15106                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15107                 qc->hits_set = 1;
15108                 qc->bytes_set = 1;
15109                 qc->hits = pkts - cnt->hits;
15110                 qc->bytes = bytes - cnt->bytes;
15111                 if (qc->reset) {
15112                         cnt->hits = pkts;
15113                         cnt->bytes = bytes;
15114                 }
15115                 return 0;
15116         }
15117         return rte_flow_error_set(error, EINVAL,
15118                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15119                                   NULL,
15120                                   "counters are not available");
15121 }
15122
15123 static int
15124 flow_dv_action_query(struct rte_eth_dev *dev,
15125                      const struct rte_flow_action_handle *handle, void *data,
15126                      struct rte_flow_error *error)
15127 {
15128         struct mlx5_age_param *age_param;
15129         struct rte_flow_query_age *resp;
15130         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15131         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15132         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15133         struct mlx5_priv *priv = dev->data->dev_private;
15134         struct mlx5_aso_ct_action *ct;
15135         uint16_t owner;
15136         uint32_t dev_idx;
15137
15138         switch (type) {
15139         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15140                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
15141                 resp = data;
15142                 resp->aged = __atomic_load_n(&age_param->state,
15143                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
15144                                                                           1 : 0;
15145                 resp->sec_since_last_hit_valid = !resp->aged;
15146                 if (resp->sec_since_last_hit_valid)
15147                         resp->sec_since_last_hit = __atomic_load_n
15148                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15149                 return 0;
15150         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15151                 return flow_dv_query_count(dev, idx, data, error);
15152         case MLX5_INDIRECT_ACTION_TYPE_CT:
15153                 owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15154                 if (owner != PORT_ID(priv))
15155                         return rte_flow_error_set(error, EACCES,
15156                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15157                                         NULL,
15158                                         "CT object owned by another port");
15159                 dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15160                 ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15161                 MLX5_ASSERT(ct);
15162                 if (!ct->refcnt)
15163                         return rte_flow_error_set(error, EFAULT,
15164                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15165                                         NULL,
15166                                         "CT object is inactive");
15167                 ((struct rte_flow_action_conntrack *)data)->peer_port =
15168                                                         ct->peer;
15169                 ((struct rte_flow_action_conntrack *)data)->is_original_dir =
15170                                                         ct->is_original;
15171                 if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))
15172                         return rte_flow_error_set(error, EIO,
15173                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15174                                         NULL,
15175                                         "Failed to query CT context");
15176                 return 0;
15177         default:
15178                 return rte_flow_error_set(error, ENOTSUP,
15179                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15180                                           "action type query not supported");
15181         }
15182 }
15183
15184 /**
15185  * Query a flow rule AGE action for aging information.
15186  *
15187  * @param[in] dev
15188  *   Pointer to Ethernet device.
15189  * @param[in] flow
15190  *   Pointer to the sub flow.
15191  * @param[out] data
15192  *   data retrieved by the query.
15193  * @param[out] error
15194  *   Perform verbose error reporting if not NULL.
15195  *
15196  * @return
15197  *   0 on success, a negative errno value otherwise and rte_errno is set.
15198  */
15199 static int
15200 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
15201                   void *data, struct rte_flow_error *error)
15202 {
15203         struct rte_flow_query_age *resp = data;
15204         struct mlx5_age_param *age_param;
15205
15206         if (flow->age) {
15207                 struct mlx5_aso_age_action *act =
15208                                      flow_aso_age_get_by_idx(dev, flow->age);
15209
15210                 age_param = &act->age_params;
15211         } else if (flow->counter) {
15212                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
15213
15214                 if (!age_param || !age_param->timeout)
15215                         return rte_flow_error_set
15216                                         (error, EINVAL,
15217                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15218                                          NULL, "cannot read age data");
15219         } else {
15220                 return rte_flow_error_set(error, EINVAL,
15221                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15222                                           NULL, "age data not available");
15223         }
15224         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
15225                                      AGE_TMOUT ? 1 : 0;
15226         resp->sec_since_last_hit_valid = !resp->aged;
15227         if (resp->sec_since_last_hit_valid)
15228                 resp->sec_since_last_hit = __atomic_load_n
15229                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15230         return 0;
15231 }
15232
15233 /**
15234  * Query a flow.
15235  *
15236  * @see rte_flow_query()
15237  * @see rte_flow_ops
15238  */
15239 static int
15240 flow_dv_query(struct rte_eth_dev *dev,
15241               struct rte_flow *flow __rte_unused,
15242               const struct rte_flow_action *actions __rte_unused,
15243               void *data __rte_unused,
15244               struct rte_flow_error *error __rte_unused)
15245 {
15246         int ret = -EINVAL;
15247
15248         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
15249                 switch (actions->type) {
15250                 case RTE_FLOW_ACTION_TYPE_VOID:
15251                         break;
15252                 case RTE_FLOW_ACTION_TYPE_COUNT:
15253                         ret = flow_dv_query_count(dev, flow->counter, data,
15254                                                   error);
15255                         break;
15256                 case RTE_FLOW_ACTION_TYPE_AGE:
15257                         ret = flow_dv_query_age(dev, flow, data, error);
15258                         break;
15259                 default:
15260                         return rte_flow_error_set(error, ENOTSUP,
15261                                                   RTE_FLOW_ERROR_TYPE_ACTION,
15262                                                   actions,
15263                                                   "action not supported");
15264                 }
15265         }
15266         return ret;
15267 }
15268
15269 /**
15270  * Destroy the meter table set.
15271  * Lock free, (mutex should be acquired by caller).
15272  *
15273  * @param[in] dev
15274  *   Pointer to Ethernet device.
15275  * @param[in] fm
15276  *   Meter information table.
15277  */
15278 static void
15279 flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
15280                         struct mlx5_flow_meter_info *fm)
15281 {
15282         struct mlx5_priv *priv = dev->data->dev_private;
15283         int i;
15284
15285         if (!fm || !priv->config.dv_flow_en)
15286                 return;
15287         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15288                 if (fm->drop_rule[i]) {
15289                         claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
15290                         fm->drop_rule[i] = NULL;
15291                 }
15292         }
15293 }
15294
15295 static void
15296 flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
15297 {
15298         struct mlx5_priv *priv = dev->data->dev_private;
15299         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15300         struct mlx5_flow_tbl_data_entry *tbl;
15301         int i, j;
15302
15303         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15304                 if (mtrmng->def_rule[i]) {
15305                         claim_zero(mlx5_flow_os_destroy_flow
15306                                         (mtrmng->def_rule[i]));
15307                         mtrmng->def_rule[i] = NULL;
15308                 }
15309                 if (mtrmng->def_matcher[i]) {
15310                         tbl = container_of(mtrmng->def_matcher[i]->tbl,
15311                                 struct mlx5_flow_tbl_data_entry, tbl);
15312                         mlx5_cache_unregister(&tbl->matchers,
15313                                       &mtrmng->def_matcher[i]->entry);
15314                         mtrmng->def_matcher[i] = NULL;
15315                 }
15316                 for (j = 0; j < MLX5_REG_BITS; j++) {
15317                         if (mtrmng->drop_matcher[i][j]) {
15318                                 tbl =
15319                                 container_of(mtrmng->drop_matcher[i][j]->tbl,
15320                                              struct mlx5_flow_tbl_data_entry,
15321                                              tbl);
15322                                 mlx5_cache_unregister(&tbl->matchers,
15323                                         &mtrmng->drop_matcher[i][j]->entry);
15324                                 mtrmng->drop_matcher[i][j] = NULL;
15325                         }
15326                 }
15327                 if (mtrmng->drop_tbl[i]) {
15328                         flow_dv_tbl_resource_release(MLX5_SH(dev),
15329                                 mtrmng->drop_tbl[i]);
15330                         mtrmng->drop_tbl[i] = NULL;
15331                 }
15332         }
15333 }
15334
15335 /* Number of meter flow actions, count and jump or count and drop. */
15336 #define METER_ACTIONS 2
15337
15338 static void
15339 __flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
15340                               enum mlx5_meter_domain domain)
15341 {
15342         struct mlx5_priv *priv = dev->data->dev_private;
15343         struct mlx5_flow_meter_def_policy *def_policy =
15344                         priv->sh->mtrmng->def_policy[domain];
15345
15346         __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
15347         mlx5_free(def_policy);
15348         priv->sh->mtrmng->def_policy[domain] = NULL;
15349 }
15350
15351 /**
15352  * Destroy the default policy table set.
15353  *
15354  * @param[in] dev
15355  *   Pointer to Ethernet device.
15356  */
15357 static void
15358 flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
15359 {
15360         struct mlx5_priv *priv = dev->data->dev_private;
15361         int i;
15362
15363         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
15364                 if (priv->sh->mtrmng->def_policy[i])
15365                         __flow_dv_destroy_domain_def_policy(dev,
15366                                         (enum mlx5_meter_domain)i);
15367         priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
15368 }
15369
15370 static int
15371 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
15372                         uint32_t color_reg_c_idx,
15373                         enum rte_color color, void *matcher_object,
15374                         int actions_n, void *actions,
15375                         bool is_default_policy, void **rule,
15376                         const struct rte_flow_attr *attr)
15377 {
15378         int ret;
15379         struct mlx5_flow_dv_match_params value = {
15380                 .size = sizeof(value.buf) -
15381                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15382         };
15383         struct mlx5_flow_dv_match_params matcher = {
15384                 .size = sizeof(matcher.buf) -
15385                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15386         };
15387         struct mlx5_priv *priv = dev->data->dev_private;
15388
15389         if (!is_default_policy && (priv->representor || priv->master)) {
15390                 if (flow_dv_translate_item_port_id(dev, matcher.buf,
15391                                                    value.buf, NULL, attr)) {
15392                         DRV_LOG(ERR,
15393                         "Failed to create meter policy flow with port.");
15394                         return -1;
15395                 }
15396         }
15397         flow_dv_match_meta_reg(matcher.buf, value.buf,
15398                                 (enum modify_reg)color_reg_c_idx,
15399                                 rte_col_2_mlx5_col(color),
15400                                 UINT32_MAX);
15401         ret = mlx5_flow_os_create_flow(matcher_object,
15402                         (void *)&value, actions_n, actions, rule);
15403         if (ret) {
15404                 DRV_LOG(ERR, "Failed to create meter policy flow.");
15405                 return -1;
15406         }
15407         return 0;
15408 }
15409
15410 static int
15411 __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
15412                         uint32_t color_reg_c_idx,
15413                         uint16_t priority,
15414                         struct mlx5_flow_meter_sub_policy *sub_policy,
15415                         const struct rte_flow_attr *attr,
15416                         bool is_default_policy,
15417                         struct rte_flow_error *error)
15418 {
15419         struct mlx5_cache_entry *entry;
15420         struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
15421         struct mlx5_flow_dv_matcher matcher = {
15422                 .mask = {
15423                         .size = sizeof(matcher.mask.buf) -
15424                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15425                 },
15426                 .tbl = tbl_rsc,
15427         };
15428         struct mlx5_flow_dv_match_params value = {
15429                 .size = sizeof(value.buf) -
15430                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15431         };
15432         struct mlx5_flow_cb_ctx ctx = {
15433                 .error = error,
15434                 .data = &matcher,
15435         };
15436         struct mlx5_flow_tbl_data_entry *tbl_data;
15437         struct mlx5_priv *priv = dev->data->dev_private;
15438         uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
15439
15440         if (!is_default_policy && (priv->representor || priv->master)) {
15441                 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
15442                                                    value.buf, NULL, attr)) {
15443                         DRV_LOG(ERR,
15444                         "Failed to register meter drop matcher with port.");
15445                         return -1;
15446                 }
15447         }
15448         tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
15449         if (priority < RTE_COLOR_RED)
15450                 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
15451                         (enum modify_reg)color_reg_c_idx, 0, color_mask);
15452         matcher.priority = priority;
15453         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
15454                                         matcher.mask.size);
15455         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
15456         if (!entry) {
15457                 DRV_LOG(ERR, "Failed to register meter drop matcher.");
15458                 return -1;
15459         }
15460         sub_policy->color_matcher[priority] =
15461                 container_of(entry, struct mlx5_flow_dv_matcher, entry);
15462         return 0;
15463 }
15464
15465 /**
15466  * Create the policy rules per domain.
15467  *
15468  * @param[in] dev
15469  *   Pointer to Ethernet device.
15470  * @param[in] sub_policy
15471  *    Pointer to sub policy table..
15472  * @param[in] egress
15473  *   Direction of the table.
15474  * @param[in] transfer
15475  *   E-Switch or NIC flow.
15476  * @param[in] acts
15477  *   Pointer to policy action list per color.
15478  *
15479  * @return
15480  *   0 on success, -1 otherwise.
15481  */
15482 static int
15483 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
15484                 struct mlx5_flow_meter_sub_policy *sub_policy,
15485                 uint8_t egress, uint8_t transfer, bool is_default_policy,
15486                 struct mlx5_meter_policy_acts acts[RTE_COLORS])
15487 {
15488         struct rte_flow_error flow_err;
15489         uint32_t color_reg_c_idx;
15490         struct rte_flow_attr attr = {
15491                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
15492                 .priority = 0,
15493                 .ingress = 0,
15494                 .egress = !!egress,
15495                 .transfer = !!transfer,
15496                 .reserved = 0,
15497         };
15498         int i;
15499         int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
15500
15501         if (ret < 0)
15502                 return -1;
15503         /* Create policy table with POLICY level. */
15504         if (!sub_policy->tbl_rsc)
15505                 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
15506                                 MLX5_FLOW_TABLE_LEVEL_POLICY,
15507                                 egress, transfer, false, NULL, 0, 0,
15508                                 sub_policy->idx, &flow_err);
15509         if (!sub_policy->tbl_rsc) {
15510                 DRV_LOG(ERR,
15511                         "Failed to create meter sub policy table.");
15512                 return -1;
15513         }
15514         /* Prepare matchers. */
15515         color_reg_c_idx = ret;
15516         for (i = 0; i < RTE_COLORS; i++) {
15517                 if (i == RTE_COLOR_YELLOW || !acts[i].actions_n)
15518                         continue;
15519                 attr.priority = i;
15520                 if (!sub_policy->color_matcher[i]) {
15521                         /* Create matchers for Color. */
15522                         if (__flow_dv_create_policy_matcher(dev,
15523                                 color_reg_c_idx, i, sub_policy,
15524                                 &attr, is_default_policy, &flow_err))
15525                                 return -1;
15526                 }
15527                 /* Create flow, matching color. */
15528                 if (acts[i].actions_n)
15529                         if (__flow_dv_create_policy_flow(dev,
15530                                 color_reg_c_idx, (enum rte_color)i,
15531                                 sub_policy->color_matcher[i]->matcher_object,
15532                                 acts[i].actions_n,
15533                                 acts[i].dv_actions,
15534                                 is_default_policy,
15535                                 &sub_policy->color_rule[i],
15536                                 &attr))
15537                                 return -1;
15538         }
15539         return 0;
15540 }
15541
15542 static int
15543 __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
15544                         struct mlx5_flow_meter_policy *mtr_policy,
15545                         struct mlx5_flow_meter_sub_policy *sub_policy,
15546                         uint32_t domain)
15547 {
15548         struct mlx5_priv *priv = dev->data->dev_private;
15549         struct mlx5_meter_policy_acts acts[RTE_COLORS];
15550         struct mlx5_flow_dv_tag_resource *tag;
15551         struct mlx5_flow_dv_port_id_action_resource *port_action;
15552         struct mlx5_hrxq *hrxq;
15553         uint8_t egress, transfer;
15554         int i;
15555
15556         for (i = 0; i < RTE_COLORS; i++) {
15557                 acts[i].actions_n = 0;
15558                 if (i == RTE_COLOR_YELLOW)
15559                         continue;
15560                 if (i == RTE_COLOR_RED) {
15561                         /* Only support drop on red. */
15562                         acts[i].dv_actions[0] =
15563                         mtr_policy->dr_drop_action[domain];
15564                         acts[i].actions_n = 1;
15565                         continue;
15566                 }
15567                 if (mtr_policy->act_cnt[i].rix_mark) {
15568                         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
15569                                         mtr_policy->act_cnt[i].rix_mark);
15570                         if (!tag) {
15571                                 DRV_LOG(ERR, "Failed to find "
15572                                 "mark action for policy.");
15573                                 return -1;
15574                         }
15575                         acts[i].dv_actions[acts[i].actions_n] =
15576                                                 tag->action;
15577                         acts[i].actions_n++;
15578                 }
15579                 if (mtr_policy->act_cnt[i].modify_hdr) {
15580                         acts[i].dv_actions[acts[i].actions_n] =
15581                         mtr_policy->act_cnt[i].modify_hdr->action;
15582                         acts[i].actions_n++;
15583                 }
15584                 if (mtr_policy->act_cnt[i].fate_action) {
15585                         switch (mtr_policy->act_cnt[i].fate_action) {
15586                         case MLX5_FLOW_FATE_PORT_ID:
15587                                 port_action = mlx5_ipool_get
15588                                         (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
15589                                 mtr_policy->act_cnt[i].rix_port_id_action);
15590                                 if (!port_action) {
15591                                         DRV_LOG(ERR, "Failed to find "
15592                                                 "port action for policy.");
15593                                         return -1;
15594                                 }
15595                                 acts[i].dv_actions[acts[i].actions_n] =
15596                                 port_action->action;
15597                                 acts[i].actions_n++;
15598                                 break;
15599                         case MLX5_FLOW_FATE_DROP:
15600                         case MLX5_FLOW_FATE_JUMP:
15601                                 acts[i].dv_actions[acts[i].actions_n] =
15602                                 mtr_policy->act_cnt[i].dr_jump_action[domain];
15603                                 acts[i].actions_n++;
15604                                 break;
15605                         case MLX5_FLOW_FATE_SHARED_RSS:
15606                         case MLX5_FLOW_FATE_QUEUE:
15607                                 hrxq = mlx5_ipool_get
15608                                 (priv->sh->ipool[MLX5_IPOOL_HRXQ],
15609                                 sub_policy->rix_hrxq[i]);
15610                                 if (!hrxq) {
15611                                         DRV_LOG(ERR, "Failed to find "
15612                                                 "queue action for policy.");
15613                                         return -1;
15614                                 }
15615                                 acts[i].dv_actions[acts[i].actions_n] =
15616                                 hrxq->action;
15617                                 acts[i].actions_n++;
15618                                 break;
15619                         default:
15620                                 /*Queue action do nothing*/
15621                                 break;
15622                         }
15623                 }
15624         }
15625         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15626         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15627         if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
15628                                 egress, transfer, false, acts)) {
15629                 DRV_LOG(ERR,
15630                 "Failed to create policy rules per domain.");
15631                 return -1;
15632         }
15633         return 0;
15634 }
15635
15636 /**
15637  * Create the policy rules.
15638  *
15639  * @param[in] dev
15640  *   Pointer to Ethernet device.
15641  * @param[in,out] mtr_policy
15642  *   Pointer to meter policy table.
15643  *
15644  * @return
15645  *   0 on success, -1 otherwise.
15646  */
15647 static int
15648 flow_dv_create_policy_rules(struct rte_eth_dev *dev,
15649                              struct mlx5_flow_meter_policy *mtr_policy)
15650 {
15651         int i;
15652         uint16_t sub_policy_num;
15653
15654         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15655                 sub_policy_num = (mtr_policy->sub_policy_num >>
15656                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15657                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15658                 if (!sub_policy_num)
15659                         continue;
15660                 /* Prepare actions list and create policy rules. */
15661                 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
15662                         mtr_policy->sub_policys[i][0], i)) {
15663                         DRV_LOG(ERR,
15664                         "Failed to create policy action list per domain.");
15665                         return -1;
15666                 }
15667         }
15668         return 0;
15669 }
15670
15671 static int
15672 __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
15673 {
15674         struct mlx5_priv *priv = dev->data->dev_private;
15675         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15676         struct mlx5_flow_meter_def_policy *def_policy;
15677         struct mlx5_flow_tbl_resource *jump_tbl;
15678         struct mlx5_flow_tbl_data_entry *tbl_data;
15679         uint8_t egress, transfer;
15680         struct rte_flow_error error;
15681         struct mlx5_meter_policy_acts acts[RTE_COLORS];
15682         int ret;
15683
15684         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15685         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15686         def_policy = mtrmng->def_policy[domain];
15687         if (!def_policy) {
15688                 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
15689                         sizeof(struct mlx5_flow_meter_def_policy),
15690                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
15691                 if (!def_policy) {
15692                         DRV_LOG(ERR, "Failed to alloc "
15693                                         "default policy table.");
15694                         goto def_policy_error;
15695                 }
15696                 mtrmng->def_policy[domain] = def_policy;
15697                 /* Create the meter suffix table with SUFFIX level. */
15698                 jump_tbl = flow_dv_tbl_resource_get(dev,
15699                                 MLX5_FLOW_TABLE_LEVEL_METER,
15700                                 egress, transfer, false, NULL, 0,
15701                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
15702                 if (!jump_tbl) {
15703                         DRV_LOG(ERR,
15704                                 "Failed to create meter suffix table.");
15705                         goto def_policy_error;
15706                 }
15707                 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
15708                 tbl_data = container_of(jump_tbl,
15709                                 struct mlx5_flow_tbl_data_entry, tbl);
15710                 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
15711                                                 tbl_data->jump.action;
15712                 acts[RTE_COLOR_GREEN].dv_actions[0] =
15713                                                 tbl_data->jump.action;
15714                 acts[RTE_COLOR_GREEN].actions_n = 1;
15715                 /* Create jump action to the drop table. */
15716                 if (!mtrmng->drop_tbl[domain]) {
15717                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
15718                                 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
15719                                 egress, transfer, false, NULL, 0,
15720                                 0, MLX5_MTR_TABLE_ID_DROP, &error);
15721                         if (!mtrmng->drop_tbl[domain]) {
15722                                 DRV_LOG(ERR, "Failed to create "
15723                                 "meter drop table for default policy.");
15724                                 goto def_policy_error;
15725                         }
15726                 }
15727                 tbl_data = container_of(mtrmng->drop_tbl[domain],
15728                                 struct mlx5_flow_tbl_data_entry, tbl);
15729                 def_policy->dr_jump_action[RTE_COLOR_RED] =
15730                                                 tbl_data->jump.action;
15731                 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
15732                 acts[RTE_COLOR_RED].actions_n = 1;
15733                 /* Create default policy rules. */
15734                 ret = __flow_dv_create_domain_policy_rules(dev,
15735                                         &def_policy->sub_policy,
15736                                         egress, transfer, true, acts);
15737                 if (ret) {
15738                         DRV_LOG(ERR, "Failed to create "
15739                                 "default policy rules.");
15740                                 goto def_policy_error;
15741                 }
15742         }
15743         return 0;
15744 def_policy_error:
15745         __flow_dv_destroy_domain_def_policy(dev,
15746                         (enum mlx5_meter_domain)domain);
15747         return -1;
15748 }
15749
15750 /**
15751  * Create the default policy table set.
15752  *
15753  * @param[in] dev
15754  *   Pointer to Ethernet device.
15755  * @return
15756  *   0 on success, -1 otherwise.
15757  */
15758 static int
15759 flow_dv_create_def_policy(struct rte_eth_dev *dev)
15760 {
15761         struct mlx5_priv *priv = dev->data->dev_private;
15762         int i;
15763
15764         /* Non-termination policy table. */
15765         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15766                 if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
15767                         continue;
15768                 if (__flow_dv_create_domain_def_policy(dev, i)) {
15769                         DRV_LOG(ERR,
15770                         "Failed to create default policy");
15771                         return -1;
15772                 }
15773         }
15774         return 0;
15775 }
15776
15777 /**
15778  * Create the needed meter tables.
15779  * Lock free, (mutex should be acquired by caller).
15780  *
15781  * @param[in] dev
15782  *   Pointer to Ethernet device.
15783  * @param[in] fm
15784  *   Meter information table.
15785  * @param[in] mtr_idx
15786  *   Meter index.
15787  * @param[in] domain_bitmap
15788  *   Domain bitmap.
15789  * @return
15790  *   0 on success, -1 otherwise.
15791  */
15792 static int
15793 flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
15794                         struct mlx5_flow_meter_info *fm,
15795                         uint32_t mtr_idx,
15796                         uint8_t domain_bitmap)
15797 {
15798         struct mlx5_priv *priv = dev->data->dev_private;
15799         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15800         struct rte_flow_error error;
15801         struct mlx5_flow_tbl_data_entry *tbl_data;
15802         uint8_t egress, transfer;
15803         void *actions[METER_ACTIONS];
15804         int domain, ret, i;
15805         struct mlx5_flow_counter *cnt;
15806         struct mlx5_flow_dv_match_params value = {
15807                 .size = sizeof(value.buf) -
15808                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15809         };
15810         struct mlx5_flow_dv_match_params matcher_para = {
15811                 .size = sizeof(matcher_para.buf) -
15812                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15813         };
15814         int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
15815                                                      0, &error);
15816         uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
15817         uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
15818         struct mlx5_cache_entry *entry;
15819         struct mlx5_flow_dv_matcher matcher = {
15820                 .mask = {
15821                         .size = sizeof(matcher.mask.buf) -
15822                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15823                 },
15824         };
15825         struct mlx5_flow_dv_matcher *drop_matcher;
15826         struct mlx5_flow_cb_ctx ctx = {
15827                 .error = &error,
15828                 .data = &matcher,
15829         };
15830
15831         if (!priv->mtr_en || mtr_id_reg_c < 0) {
15832                 rte_errno = ENOTSUP;
15833                 return -1;
15834         }
15835         for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
15836                 if (!(domain_bitmap & (1 << domain)) ||
15837                         (mtrmng->def_rule[domain] && !fm->drop_cnt))
15838                         continue;
15839                 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15840                 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15841                 /* Create the drop table with METER DROP level. */
15842                 if (!mtrmng->drop_tbl[domain]) {
15843                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
15844                                         MLX5_FLOW_TABLE_LEVEL_METER,
15845                                         egress, transfer, false, NULL, 0,
15846                                         0, MLX5_MTR_TABLE_ID_DROP, &error);
15847                         if (!mtrmng->drop_tbl[domain]) {
15848                                 DRV_LOG(ERR, "Failed to create meter drop table.");
15849                                 goto policy_error;
15850                         }
15851                 }
15852                 /* Create default matcher in drop table. */
15853                 matcher.tbl = mtrmng->drop_tbl[domain],
15854                 tbl_data = container_of(mtrmng->drop_tbl[domain],
15855                                 struct mlx5_flow_tbl_data_entry, tbl);
15856                 if (!mtrmng->def_matcher[domain]) {
15857                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
15858                                        (enum modify_reg)mtr_id_reg_c,
15859                                        0, 0);
15860                         matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
15861                         matcher.crc = rte_raw_cksum
15862                                         ((const void *)matcher.mask.buf,
15863                                         matcher.mask.size);
15864                         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
15865                         if (!entry) {
15866                                 DRV_LOG(ERR, "Failed to register meter "
15867                                 "drop default matcher.");
15868                                 goto policy_error;
15869                         }
15870                         mtrmng->def_matcher[domain] = container_of(entry,
15871                         struct mlx5_flow_dv_matcher, entry);
15872                 }
15873                 /* Create default rule in drop table. */
15874                 if (!mtrmng->def_rule[domain]) {
15875                         i = 0;
15876                         actions[i++] = priv->sh->dr_drop_action;
15877                         flow_dv_match_meta_reg(matcher_para.buf, value.buf,
15878                                 (enum modify_reg)mtr_id_reg_c, 0, 0);
15879                         ret = mlx5_flow_os_create_flow
15880                                 (mtrmng->def_matcher[domain]->matcher_object,
15881                                 (void *)&value, i, actions,
15882                                 &mtrmng->def_rule[domain]);
15883                         if (ret) {
15884                                 DRV_LOG(ERR, "Failed to create meter "
15885                                 "default drop rule for drop table.");
15886                                 goto policy_error;
15887                         }
15888                 }
15889                 if (!fm->drop_cnt)
15890                         continue;
15891                 MLX5_ASSERT(mtrmng->max_mtr_bits);
15892                 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
15893                         /* Create matchers for Drop. */
15894                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
15895                                         (enum modify_reg)mtr_id_reg_c, 0,
15896                                         (mtr_id_mask << mtr_id_offset));
15897                         matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
15898                         matcher.crc = rte_raw_cksum
15899                                         ((const void *)matcher.mask.buf,
15900                                         matcher.mask.size);
15901                         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
15902                         if (!entry) {
15903                                 DRV_LOG(ERR,
15904                                 "Failed to register meter drop matcher.");
15905                                 goto policy_error;
15906                         }
15907                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
15908                                 container_of(entry, struct mlx5_flow_dv_matcher,
15909                                              entry);
15910                 }
15911                 drop_matcher =
15912                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
15913                 /* Create drop rule, matching meter_id only. */
15914                 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
15915                                 (enum modify_reg)mtr_id_reg_c,
15916                                 (mtr_idx << mtr_id_offset), UINT32_MAX);
15917                 i = 0;
15918                 cnt = flow_dv_counter_get_by_idx(dev,
15919                                         fm->drop_cnt, NULL);
15920                 actions[i++] = cnt->action;
15921                 actions[i++] = priv->sh->dr_drop_action;
15922                 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
15923                                                (void *)&value, i, actions,
15924                                                &fm->drop_rule[domain]);
15925                 if (ret) {
15926                         DRV_LOG(ERR, "Failed to create meter "
15927                                 "drop rule for drop table.");
15928                                 goto policy_error;
15929                 }
15930         }
15931         return 0;
15932 policy_error:
15933         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15934                 if (fm->drop_rule[i]) {
15935                         claim_zero(mlx5_flow_os_destroy_flow
15936                                 (fm->drop_rule[i]));
15937                         fm->drop_rule[i] = NULL;
15938                 }
15939         }
15940         return -1;
15941 }
15942
15943 /**
15944  * Find the policy table for prefix table with RSS.
15945  *
15946  * @param[in] dev
15947  *   Pointer to Ethernet device.
15948  * @param[in] mtr_policy
15949  *   Pointer to meter policy table.
15950  * @param[in] rss_desc
15951  *   Pointer to rss_desc
15952  * @return
15953  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
15954  */
15955 static struct mlx5_flow_meter_sub_policy *
15956 flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
15957                 struct mlx5_flow_meter_policy *mtr_policy,
15958                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
15959 {
15960         struct mlx5_priv *priv = dev->data->dev_private;
15961         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
15962         uint32_t sub_policy_idx = 0;
15963         uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
15964         uint32_t i, j;
15965         struct mlx5_hrxq *hrxq;
15966         struct mlx5_flow_handle dh;
15967         struct mlx5_meter_policy_action_container *act_cnt;
15968         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
15969         uint16_t sub_policy_num;
15970
15971         rte_spinlock_lock(&mtr_policy->sl);
15972         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15973                 if (!rss_desc[i])
15974                         continue;
15975                 hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
15976                 if (!hrxq_idx[i]) {
15977                         rte_spinlock_unlock(&mtr_policy->sl);
15978                         return NULL;
15979                 }
15980         }
15981         sub_policy_num = (mtr_policy->sub_policy_num >>
15982                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
15983                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15984         for (i = 0; i < sub_policy_num;
15985                 i++) {
15986                 for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
15987                         if (rss_desc[j] &&
15988                                 hrxq_idx[j] !=
15989                         mtr_policy->sub_policys[domain][i]->rix_hrxq[j])
15990                                 break;
15991                 }
15992                 if (j >= MLX5_MTR_RTE_COLORS) {
15993                         /*
15994                          * Found the sub policy table with
15995                          * the same queue per color
15996                          */
15997                         rte_spinlock_unlock(&mtr_policy->sl);
15998                         for (j = 0; j < MLX5_MTR_RTE_COLORS; j++)
15999                                 mlx5_hrxq_release(dev, hrxq_idx[j]);
16000                         return mtr_policy->sub_policys[domain][i];
16001                 }
16002         }
16003         /* Create sub policy. */
16004         if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
16005                 /* Reuse the first dummy sub_policy*/
16006                 sub_policy = mtr_policy->sub_policys[domain][0];
16007                 sub_policy_idx = sub_policy->idx;
16008         } else {
16009                 sub_policy = mlx5_ipool_zmalloc
16010                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16011                                 &sub_policy_idx);
16012                 if (!sub_policy ||
16013                         sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
16014                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16015                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
16016                         goto rss_sub_policy_error;
16017                 }
16018                 sub_policy->idx = sub_policy_idx;
16019                 sub_policy->main_policy = mtr_policy;
16020         }
16021         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16022                 if (!rss_desc[i])
16023                         continue;
16024                 sub_policy->rix_hrxq[i] = hrxq_idx[i];
16025                 /*
16026                  * Overwrite the last action from
16027                  * RSS action to Queue action.
16028                  */
16029                 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
16030                               hrxq_idx[i]);
16031                 if (!hrxq) {
16032                         DRV_LOG(ERR, "Failed to create policy hrxq");
16033                         goto rss_sub_policy_error;
16034                 }
16035                 act_cnt = &mtr_policy->act_cnt[i];
16036                 if (act_cnt->rix_mark || act_cnt->modify_hdr) {
16037                         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
16038                         if (act_cnt->rix_mark)
16039                                 dh.mark = 1;
16040                         dh.fate_action = MLX5_FLOW_FATE_QUEUE;
16041                         dh.rix_hrxq = hrxq_idx[i];
16042                         flow_drv_rxq_flags_set(dev, &dh);
16043                 }
16044         }
16045         if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16046                 sub_policy, domain)) {
16047                 DRV_LOG(ERR, "Failed to create policy "
16048                         "rules per domain.");
16049                 goto rss_sub_policy_error;
16050         }
16051         if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16052                 i = (mtr_policy->sub_policy_num >>
16053                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16054                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16055                 mtr_policy->sub_policys[domain][i] = sub_policy;
16056                 i++;
16057                 if (i > MLX5_MTR_RSS_MAX_SUB_POLICY)
16058                         goto rss_sub_policy_error;
16059                 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16060                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16061                 mtr_policy->sub_policy_num |=
16062                         (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16063                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16064         }
16065         rte_spinlock_unlock(&mtr_policy->sl);
16066         return sub_policy;
16067 rss_sub_policy_error:
16068         if (sub_policy) {
16069                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16070                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16071                         i = (mtr_policy->sub_policy_num >>
16072                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16073                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16074                         mtr_policy->sub_policys[domain][i] = NULL;
16075                         mlx5_ipool_free
16076                         (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16077                                         sub_policy->idx);
16078                 }
16079         }
16080         if (sub_policy_idx)
16081                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16082                         sub_policy_idx);
16083         rte_spinlock_unlock(&mtr_policy->sl);
16084         return NULL;
16085 }
16086
16087
16088 /**
16089  * Destroy the sub policy table with RX queue.
16090  *
16091  * @param[in] dev
16092  *   Pointer to Ethernet device.
16093  * @param[in] mtr_policy
16094  *   Pointer to meter policy table.
16095  */
16096 static void
16097 flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
16098                 struct mlx5_flow_meter_policy *mtr_policy)
16099 {
16100         struct mlx5_priv *priv = dev->data->dev_private;
16101         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16102         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16103         uint32_t i, j;
16104         uint16_t sub_policy_num, new_policy_num;
16105
16106         rte_spinlock_lock(&mtr_policy->sl);
16107         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16108                 switch (mtr_policy->act_cnt[i].fate_action) {
16109                 case MLX5_FLOW_FATE_SHARED_RSS:
16110                         sub_policy_num = (mtr_policy->sub_policy_num >>
16111                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16112                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16113                         new_policy_num = sub_policy_num;
16114                         for (j = 0; j < sub_policy_num; j++) {
16115                                 sub_policy =
16116                                         mtr_policy->sub_policys[domain][j];
16117                                 if (sub_policy) {
16118                                         __flow_dv_destroy_sub_policy_rules(dev,
16119                                                 sub_policy);
16120                                 if (sub_policy !=
16121                                         mtr_policy->sub_policys[domain][0]) {
16122                                         mtr_policy->sub_policys[domain][j] =
16123                                                                 NULL;
16124                                         mlx5_ipool_free
16125                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16126                                                 sub_policy->idx);
16127                                                 new_policy_num--;
16128                                         }
16129                                 }
16130                         }
16131                         if (new_policy_num != sub_policy_num) {
16132                                 mtr_policy->sub_policy_num &=
16133                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16134                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16135                                 mtr_policy->sub_policy_num |=
16136                                 (new_policy_num &
16137                                         MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16138                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16139                         }
16140                         break;
16141                 case MLX5_FLOW_FATE_QUEUE:
16142                         sub_policy = mtr_policy->sub_policys[domain][0];
16143                         __flow_dv_destroy_sub_policy_rules(dev,
16144                                                 sub_policy);
16145                         break;
16146                 default:
16147                         /*Other actions without queue and do nothing*/
16148                         break;
16149                 }
16150         }
16151         rte_spinlock_unlock(&mtr_policy->sl);
16152 }
16153
16154 /**
16155  * Validate the batch counter support in root table.
16156  *
16157  * Create a simple flow with invalid counter and drop action on root table to
16158  * validate if batch counter with offset on root table is supported or not.
16159  *
16160  * @param[in] dev
16161  *   Pointer to rte_eth_dev structure.
16162  *
16163  * @return
16164  *   0 on success, a negative errno value otherwise and rte_errno is set.
16165  */
16166 int
16167 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
16168 {
16169         struct mlx5_priv *priv = dev->data->dev_private;
16170         struct mlx5_dev_ctx_shared *sh = priv->sh;
16171         struct mlx5_flow_dv_match_params mask = {
16172                 .size = sizeof(mask.buf),
16173         };
16174         struct mlx5_flow_dv_match_params value = {
16175                 .size = sizeof(value.buf),
16176         };
16177         struct mlx5dv_flow_matcher_attr dv_attr = {
16178                 .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
16179                 .priority = 0,
16180                 .match_criteria_enable = 0,
16181                 .match_mask = (void *)&mask,
16182         };
16183         void *actions[2] = { 0 };
16184         struct mlx5_flow_tbl_resource *tbl = NULL;
16185         struct mlx5_devx_obj *dcs = NULL;
16186         void *matcher = NULL;
16187         void *flow = NULL;
16188         int ret = -1;
16189
16190         tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
16191                                         0, 0, 0, NULL);
16192         if (!tbl)
16193                 goto err;
16194         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
16195         if (!dcs)
16196                 goto err;
16197         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
16198                                                     &actions[0]);
16199         if (ret)
16200                 goto err;
16201         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
16202         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
16203                                                &matcher);
16204         if (ret)
16205                 goto err;
16206         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
16207                                        actions, &flow);
16208 err:
16209         /*
16210          * If batch counter with offset is not supported, the driver will not
16211          * validate the invalid offset value, flow create should success.
16212          * In this case, it means batch counter is not supported in root table.
16213          *
16214          * Otherwise, if flow create is failed, counter offset is supported.
16215          */
16216         if (flow) {
16217                 DRV_LOG(INFO, "Batch counter is not supported in root "
16218                               "table. Switch to fallback mode.");
16219                 rte_errno = ENOTSUP;
16220                 ret = -rte_errno;
16221                 claim_zero(mlx5_flow_os_destroy_flow(flow));
16222         } else {
16223                 /* Check matcher to make sure validate fail at flow create. */
16224                 if (!matcher || (matcher && errno != EINVAL))
16225                         DRV_LOG(ERR, "Unexpected error in counter offset "
16226                                      "support detection");
16227                 ret = 0;
16228         }
16229         if (actions[0])
16230                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
16231         if (matcher)
16232                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
16233         if (tbl)
16234                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
16235         if (dcs)
16236                 claim_zero(mlx5_devx_cmd_destroy(dcs));
16237         return ret;
16238 }
16239
16240 /**
16241  * Query a devx counter.
16242  *
16243  * @param[in] dev
16244  *   Pointer to the Ethernet device structure.
16245  * @param[in] cnt
16246  *   Index to the flow counter.
16247  * @param[in] clear
16248  *   Set to clear the counter statistics.
16249  * @param[out] pkts
16250  *   The statistics value of packets.
16251  * @param[out] bytes
16252  *   The statistics value of bytes.
16253  *
16254  * @return
16255  *   0 on success, otherwise return -1.
16256  */
16257 static int
16258 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
16259                       uint64_t *pkts, uint64_t *bytes)
16260 {
16261         struct mlx5_priv *priv = dev->data->dev_private;
16262         struct mlx5_flow_counter *cnt;
16263         uint64_t inn_pkts, inn_bytes;
16264         int ret;
16265
16266         if (!priv->config.devx)
16267                 return -1;
16268
16269         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
16270         if (ret)
16271                 return -1;
16272         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
16273         *pkts = inn_pkts - cnt->hits;
16274         *bytes = inn_bytes - cnt->bytes;
16275         if (clear) {
16276                 cnt->hits = inn_pkts;
16277                 cnt->bytes = inn_bytes;
16278         }
16279         return 0;
16280 }
16281
16282 /**
16283  * Get aged-out flows.
16284  *
16285  * @param[in] dev
16286  *   Pointer to the Ethernet device structure.
16287  * @param[in] context
16288  *   The address of an array of pointers to the aged-out flows contexts.
16289  * @param[in] nb_contexts
16290  *   The length of context array pointers.
16291  * @param[out] error
16292  *   Perform verbose error reporting if not NULL. Initialized in case of
16293  *   error only.
16294  *
16295  * @return
16296  *   how many contexts get in success, otherwise negative errno value.
16297  *   if nb_contexts is 0, return the amount of all aged contexts.
16298  *   if nb_contexts is not 0 , return the amount of aged flows reported
16299  *   in the context array.
16300  * @note: only stub for now
16301  */
16302 static int
16303 flow_get_aged_flows(struct rte_eth_dev *dev,
16304                     void **context,
16305                     uint32_t nb_contexts,
16306                     struct rte_flow_error *error)
16307 {
16308         struct mlx5_priv *priv = dev->data->dev_private;
16309         struct mlx5_age_info *age_info;
16310         struct mlx5_age_param *age_param;
16311         struct mlx5_flow_counter *counter;
16312         struct mlx5_aso_age_action *act;
16313         int nb_flows = 0;
16314
16315         if (nb_contexts && !context)
16316                 return rte_flow_error_set(error, EINVAL,
16317                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16318                                           NULL, "empty context");
16319         age_info = GET_PORT_AGE_INFO(priv);
16320         rte_spinlock_lock(&age_info->aged_sl);
16321         LIST_FOREACH(act, &age_info->aged_aso, next) {
16322                 nb_flows++;
16323                 if (nb_contexts) {
16324                         context[nb_flows - 1] =
16325                                                 act->age_params.context;
16326                         if (!(--nb_contexts))
16327                                 break;
16328                 }
16329         }
16330         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
16331                 nb_flows++;
16332                 if (nb_contexts) {
16333                         age_param = MLX5_CNT_TO_AGE(counter);
16334                         context[nb_flows - 1] = age_param->context;
16335                         if (!(--nb_contexts))
16336                                 break;
16337                 }
16338         }
16339         rte_spinlock_unlock(&age_info->aged_sl);
16340         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
16341         return nb_flows;
16342 }
16343
16344 /*
16345  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
16346  */
16347 static uint32_t
16348 flow_dv_counter_allocate(struct rte_eth_dev *dev)
16349 {
16350         return flow_dv_counter_alloc(dev, 0);
16351 }
16352
16353 /**
16354  * Validate indirect action.
16355  * Dispatcher for action type specific validation.
16356  *
16357  * @param[in] dev
16358  *   Pointer to the Ethernet device structure.
16359  * @param[in] conf
16360  *   Indirect action configuration.
16361  * @param[in] action
16362  *   The indirect action object to validate.
16363  * @param[out] error
16364  *   Perform verbose error reporting if not NULL. Initialized in case of
16365  *   error only.
16366  *
16367  * @return
16368  *   0 on success, otherwise negative errno value.
16369  */
16370 static int
16371 flow_dv_action_validate(struct rte_eth_dev *dev,
16372                         const struct rte_flow_indir_action_conf *conf,
16373                         const struct rte_flow_action *action,
16374                         struct rte_flow_error *err)
16375 {
16376         struct mlx5_priv *priv = dev->data->dev_private;
16377
16378         RTE_SET_USED(conf);
16379         switch (action->type) {
16380         case RTE_FLOW_ACTION_TYPE_RSS:
16381                 /*
16382                  * priv->obj_ops is set according to driver capabilities.
16383                  * When DevX capabilities are
16384                  * sufficient, it is set to devx_obj_ops.
16385                  * Otherwise, it is set to ibv_obj_ops.
16386                  * ibv_obj_ops doesn't support ind_table_modify operation.
16387                  * In this case the indirect RSS action can't be used.
16388                  */
16389                 if (priv->obj_ops.ind_table_modify == NULL)
16390                         return rte_flow_error_set
16391                                         (err, ENOTSUP,
16392                                          RTE_FLOW_ERROR_TYPE_ACTION,
16393                                          NULL,
16394                                          "Indirect RSS action not supported");
16395                 return mlx5_validate_action_rss(dev, action, err);
16396         case RTE_FLOW_ACTION_TYPE_AGE:
16397                 if (!priv->sh->aso_age_mng)
16398                         return rte_flow_error_set(err, ENOTSUP,
16399                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16400                                                 NULL,
16401                                                 "Indirect age action not supported");
16402                 return flow_dv_validate_action_age(0, action, dev, err);
16403         case RTE_FLOW_ACTION_TYPE_COUNT:
16404                 /*
16405                  * There are two mechanisms to share the action count.
16406                  * The old mechanism uses the shared field to share, while the
16407                  * new mechanism uses the indirect action API.
16408                  * This validation comes to make sure that the two mechanisms
16409                  * are not combined.
16410                  */
16411                 if (is_shared_action_count(action))
16412                         return rte_flow_error_set(err, ENOTSUP,
16413                                                   RTE_FLOW_ERROR_TYPE_ACTION,
16414                                                   NULL,
16415                                                   "Mix shared and indirect counter is not supported");
16416                 return flow_dv_validate_action_count(dev, true, 0, err);
16417         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
16418                 if (!priv->sh->ct_aso_en)
16419                         return rte_flow_error_set(err, ENOTSUP,
16420                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16421                                         "ASO CT is not supported");
16422                 return mlx5_validate_action_ct(dev, action->conf, err);
16423         default:
16424                 return rte_flow_error_set(err, ENOTSUP,
16425                                           RTE_FLOW_ERROR_TYPE_ACTION,
16426                                           NULL,
16427                                           "action type not supported");
16428         }
16429 }
16430
16431 /**
16432  * Validate meter policy actions.
16433  * Dispatcher for action type specific validation.
16434  *
16435  * @param[in] dev
16436  *   Pointer to the Ethernet device structure.
16437  * @param[in] action
16438  *   The meter policy action object to validate.
16439  * @param[in] attr
16440  *   Attributes of flow to determine steering domain.
16441  * @param[out] error
16442  *   Perform verbose error reporting if not NULL. Initialized in case of
16443  *   error only.
16444  *
16445  * @return
16446  *   0 on success, otherwise negative errno value.
16447  */
16448 static int
16449 flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
16450                         const struct rte_flow_action *actions[RTE_COLORS],
16451                         struct rte_flow_attr *attr,
16452                         bool *is_rss,
16453                         uint8_t *domain_bitmap,
16454                         bool *is_def_policy,
16455                         struct rte_mtr_error *error)
16456 {
16457         struct mlx5_priv *priv = dev->data->dev_private;
16458         struct mlx5_dev_config *dev_conf = &priv->config;
16459         const struct rte_flow_action *act;
16460         uint64_t action_flags = 0;
16461         int actions_n;
16462         int i, ret;
16463         struct rte_flow_error flow_err;
16464         uint8_t domain_color[RTE_COLORS] = {0};
16465         uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
16466
16467         if (!priv->config.dv_esw_en)
16468                 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
16469         *domain_bitmap = def_domain;
16470         if (actions[RTE_COLOR_YELLOW] &&
16471                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_END)
16472                 return -rte_mtr_error_set(error, ENOTSUP,
16473                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
16474                                 NULL,
16475                                 "Yellow color does not support any action.");
16476         if (actions[RTE_COLOR_YELLOW] &&
16477                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_DROP)
16478                 return -rte_mtr_error_set(error, ENOTSUP,
16479                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
16480                                 NULL, "Red color only supports drop action.");
16481         /*
16482          * Check default policy actions:
16483          * Green/Yellow: no action, Red: drop action
16484          */
16485         if ((!actions[RTE_COLOR_GREEN] ||
16486                 actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)) {
16487                 *is_def_policy = true;
16488                 return 0;
16489         }
16490         flow_err.message = NULL;
16491         for (i = 0; i < RTE_COLORS; i++) {
16492                 act = actions[i];
16493                 for (action_flags = 0, actions_n = 0;
16494                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
16495                         act++) {
16496                         if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
16497                                 return -rte_mtr_error_set(error, ENOTSUP,
16498                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16499                                           NULL, "too many actions");
16500                         switch (act->type) {
16501                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
16502                                 if (!priv->config.dv_esw_en)
16503                                         return -rte_mtr_error_set(error,
16504                                         ENOTSUP,
16505                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16506                                         NULL, "PORT action validate check"
16507                                         " fail for ESW disable");
16508                                 ret = flow_dv_validate_action_port_id(dev,
16509                                                 action_flags,
16510                                                 act, attr, &flow_err);
16511                                 if (ret)
16512                                         return -rte_mtr_error_set(error,
16513                                         ENOTSUP,
16514                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16515                                         NULL, flow_err.message ?
16516                                         flow_err.message :
16517                                         "PORT action validate check fail");
16518                                 ++actions_n;
16519                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
16520                                 break;
16521                         case RTE_FLOW_ACTION_TYPE_MARK:
16522                                 ret = flow_dv_validate_action_mark(dev, act,
16523                                                            action_flags,
16524                                                            attr, &flow_err);
16525                                 if (ret < 0)
16526                                         return -rte_mtr_error_set(error,
16527                                         ENOTSUP,
16528                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16529                                         NULL, flow_err.message ?
16530                                         flow_err.message :
16531                                         "Mark action validate check fail");
16532                                 if (dev_conf->dv_xmeta_en !=
16533                                         MLX5_XMETA_MODE_LEGACY)
16534                                         return -rte_mtr_error_set(error,
16535                                         ENOTSUP,
16536                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16537                                         NULL, "Extend MARK action is "
16538                                         "not supported. Please try use "
16539                                         "default policy for meter.");
16540                                 action_flags |= MLX5_FLOW_ACTION_MARK;
16541                                 ++actions_n;
16542                                 break;
16543                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
16544                                 ret = flow_dv_validate_action_set_tag(dev,
16545                                                         act, action_flags,
16546                                                         attr, &flow_err);
16547                                 if (ret)
16548                                         return -rte_mtr_error_set(error,
16549                                         ENOTSUP,
16550                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16551                                         NULL, flow_err.message ?
16552                                         flow_err.message :
16553                                         "Set tag action validate check fail");
16554                                 /*
16555                                  * Count all modify-header actions
16556                                  * as one action.
16557                                  */
16558                                 if (!(action_flags &
16559                                         MLX5_FLOW_MODIFY_HDR_ACTIONS))
16560                                         ++actions_n;
16561                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
16562                                 break;
16563                         case RTE_FLOW_ACTION_TYPE_DROP:
16564                                 ret = mlx5_flow_validate_action_drop
16565                                         (action_flags,
16566                                         attr, &flow_err);
16567                                 if (ret < 0)
16568                                         return -rte_mtr_error_set(error,
16569                                         ENOTSUP,
16570                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16571                                         NULL, flow_err.message ?
16572                                         flow_err.message :
16573                                         "Drop action validate check fail");
16574                                 action_flags |= MLX5_FLOW_ACTION_DROP;
16575                                 ++actions_n;
16576                                 break;
16577                         case RTE_FLOW_ACTION_TYPE_QUEUE:
16578                                 /*
16579                                  * Check whether extensive
16580                                  * metadata feature is engaged.
16581                                  */
16582                                 if (dev_conf->dv_flow_en &&
16583                                         (dev_conf->dv_xmeta_en !=
16584                                         MLX5_XMETA_MODE_LEGACY) &&
16585                                         mlx5_flow_ext_mreg_supported(dev))
16586                                         return -rte_mtr_error_set(error,
16587                                           ENOTSUP,
16588                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16589                                           NULL, "Queue action with meta "
16590                                           "is not supported. Please try use "
16591                                           "default policy for meter.");
16592                                 ret = mlx5_flow_validate_action_queue(act,
16593                                                         action_flags, dev,
16594                                                         attr, &flow_err);
16595                                 if (ret < 0)
16596                                         return -rte_mtr_error_set(error,
16597                                           ENOTSUP,
16598                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16599                                           NULL, flow_err.message ?
16600                                           flow_err.message :
16601                                           "Queue action validate check fail");
16602                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
16603                                 ++actions_n;
16604                                 break;
16605                         case RTE_FLOW_ACTION_TYPE_RSS:
16606                                 if (dev_conf->dv_flow_en &&
16607                                         (dev_conf->dv_xmeta_en !=
16608                                         MLX5_XMETA_MODE_LEGACY) &&
16609                                         mlx5_flow_ext_mreg_supported(dev))
16610                                         return -rte_mtr_error_set(error,
16611                                           ENOTSUP,
16612                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16613                                           NULL, "RSS action with meta "
16614                                           "is not supported. Please try use "
16615                                           "default policy for meter.");
16616                                 ret = mlx5_validate_action_rss(dev, act,
16617                                                 &flow_err);
16618                                 if (ret < 0)
16619                                         return -rte_mtr_error_set(error,
16620                                           ENOTSUP,
16621                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16622                                           NULL, flow_err.message ?
16623                                           flow_err.message :
16624                                           "RSS action validate check fail");
16625                                 action_flags |= MLX5_FLOW_ACTION_RSS;
16626                                 ++actions_n;
16627                                 *is_rss = true;
16628                                 break;
16629                         case RTE_FLOW_ACTION_TYPE_JUMP:
16630                                 ret = flow_dv_validate_action_jump(dev,
16631                                         NULL, act, action_flags,
16632                                         attr, true, &flow_err);
16633                                 if (ret)
16634                                         return -rte_mtr_error_set(error,
16635                                           ENOTSUP,
16636                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16637                                           NULL, flow_err.message ?
16638                                           flow_err.message :
16639                                           "Jump action validate check fail");
16640                                 ++actions_n;
16641                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
16642                                 break;
16643                         default:
16644                                 return -rte_mtr_error_set(error, ENOTSUP,
16645                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16646                                         NULL,
16647                                         "Doesn't support optional action");
16648                         }
16649                 }
16650                 /* Yellow is not supported, just skip. */
16651                 if (i == RTE_COLOR_YELLOW)
16652                         continue;
16653                 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
16654                         domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
16655                 else if ((action_flags &
16656                         (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
16657                         (action_flags & MLX5_FLOW_ACTION_MARK))
16658                         /*
16659                          * Only support MLX5_XMETA_MODE_LEGACY
16660                          * so MARK action only in ingress domain.
16661                          */
16662                         domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
16663                 else
16664                         domain_color[i] = def_domain;
16665                 /*
16666                  * Validate the drop action mutual exclusion
16667                  * with other actions. Drop action is mutually-exclusive
16668                  * with any other action, except for Count action.
16669                  */
16670                 if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
16671                         (action_flags & ~MLX5_FLOW_ACTION_DROP)) {
16672                         return -rte_mtr_error_set(error, ENOTSUP,
16673                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
16674                                 NULL, "Drop action is mutually-exclusive "
16675                                 "with any other action");
16676                 }
16677                 /* Eswitch has few restrictions on using items and actions */
16678                 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
16679                         if (!mlx5_flow_ext_mreg_supported(dev) &&
16680                                 action_flags & MLX5_FLOW_ACTION_MARK)
16681                                 return -rte_mtr_error_set(error, ENOTSUP,
16682                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16683                                         NULL, "unsupported action MARK");
16684                         if (action_flags & MLX5_FLOW_ACTION_QUEUE)
16685                                 return -rte_mtr_error_set(error, ENOTSUP,
16686                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16687                                         NULL, "unsupported action QUEUE");
16688                         if (action_flags & MLX5_FLOW_ACTION_RSS)
16689                                 return -rte_mtr_error_set(error, ENOTSUP,
16690                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16691                                         NULL, "unsupported action RSS");
16692                         if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
16693                                 return -rte_mtr_error_set(error, ENOTSUP,
16694                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16695                                         NULL, "no fate action is found");
16696                 } else {
16697                         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) &&
16698                                 (domain_color[i] &
16699                                 MLX5_MTR_DOMAIN_INGRESS_BIT)) {
16700                                 if ((domain_color[i] &
16701                                         MLX5_MTR_DOMAIN_EGRESS_BIT))
16702                                         domain_color[i] =
16703                                         MLX5_MTR_DOMAIN_EGRESS_BIT;
16704                                 else
16705                                         return -rte_mtr_error_set(error,
16706                                         ENOTSUP,
16707                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16708                                         NULL, "no fate action is found");
16709                         }
16710                 }
16711                 if (domain_color[i] != def_domain)
16712                         *domain_bitmap = domain_color[i];
16713         }
16714         return 0;
16715 }
16716
16717 static int
16718 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
16719 {
16720         struct mlx5_priv *priv = dev->data->dev_private;
16721         int ret = 0;
16722
16723         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
16724                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
16725                                                 flags);
16726                 if (ret != 0)
16727                         return ret;
16728         }
16729         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
16730                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
16731                 if (ret != 0)
16732                         return ret;
16733         }
16734         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
16735                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
16736                 if (ret != 0)
16737                         return ret;
16738         }
16739         return 0;
16740 }
16741
16742 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
16743         .validate = flow_dv_validate,
16744         .prepare = flow_dv_prepare,
16745         .translate = flow_dv_translate,
16746         .apply = flow_dv_apply,
16747         .remove = flow_dv_remove,
16748         .destroy = flow_dv_destroy,
16749         .query = flow_dv_query,
16750         .create_mtr_tbls = flow_dv_create_mtr_tbls,
16751         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
16752         .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
16753         .create_meter = flow_dv_mtr_alloc,
16754         .free_meter = flow_dv_aso_mtr_release_to_pool,
16755         .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
16756         .create_mtr_acts = flow_dv_create_mtr_policy_acts,
16757         .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
16758         .create_policy_rules = flow_dv_create_policy_rules,
16759         .destroy_policy_rules = flow_dv_destroy_policy_rules,
16760         .create_def_policy = flow_dv_create_def_policy,
16761         .destroy_def_policy = flow_dv_destroy_def_policy,
16762         .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
16763         .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
16764         .counter_alloc = flow_dv_counter_allocate,
16765         .counter_free = flow_dv_counter_free,
16766         .counter_query = flow_dv_counter_query,
16767         .get_aged_flows = flow_get_aged_flows,
16768         .action_validate = flow_dv_action_validate,
16769         .action_create = flow_dv_action_create,
16770         .action_destroy = flow_dv_action_destroy,
16771         .action_update = flow_dv_action_update,
16772         .action_query = flow_dv_action_query,
16773         .sync_domain = flow_dv_sync_domain,
16774 };
16775
16776 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
16777