cd7783507616af9b140603176aaeb1e0eb61038c
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24 #include <rte_mtr.h>
25 #include <rte_mtr_driver.h>
26 #include <rte_tailq.h>
27
28 #include <mlx5_glue.h>
29 #include <mlx5_devx_cmds.h>
30 #include <mlx5_prm.h>
31 #include <mlx5_malloc.h>
32
33 #include "mlx5_defs.h"
34 #include "mlx5.h"
35 #include "mlx5_common_os.h"
36 #include "mlx5_flow.h"
37 #include "mlx5_flow_os.h"
38 #include "mlx5_rx.h"
39 #include "mlx5_tx.h"
40 #include "rte_pmd_mlx5.h"
41
42 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
43
44 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
45 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
46 #endif
47
48 #ifndef HAVE_MLX5DV_DR_ESWITCH
49 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
50 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
51 #endif
52 #endif
53
54 #ifndef HAVE_MLX5DV_DR
55 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
56 #endif
57
58 /* VLAN header definitions */
59 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
60 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
61 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
62 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
63 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
64
65 union flow_dv_attr {
66         struct {
67                 uint32_t valid:1;
68                 uint32_t ipv4:1;
69                 uint32_t ipv6:1;
70                 uint32_t tcp:1;
71                 uint32_t udp:1;
72                 uint32_t reserved:27;
73         };
74         uint32_t attr;
75 };
76
77 static int
78 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
79                              struct mlx5_flow_tbl_resource *tbl);
80
81 static int
82 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
83                                      uint32_t encap_decap_idx);
84
85 static int
86 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
87                                         uint32_t port_id);
88 static void
89 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
90
91 static int
92 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
93                                   uint32_t rix_jump);
94
95 /**
96  * Initialize flow attributes structure according to flow items' types.
97  *
98  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
99  * mode. For tunnel mode, the items to be modified are the outermost ones.
100  *
101  * @param[in] item
102  *   Pointer to item specification.
103  * @param[out] attr
104  *   Pointer to flow attributes structure.
105  * @param[in] dev_flow
106  *   Pointer to the sub flow.
107  * @param[in] tunnel_decap
108  *   Whether action is after tunnel decapsulation.
109  */
110 static void
111 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
112                   struct mlx5_flow *dev_flow, bool tunnel_decap)
113 {
114         uint64_t layers = dev_flow->handle->layers;
115
116         /*
117          * If layers is already initialized, it means this dev_flow is the
118          * suffix flow, the layers flags is set by the prefix flow. Need to
119          * use the layer flags from prefix flow as the suffix flow may not
120          * have the user defined items as the flow is split.
121          */
122         if (layers) {
123                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
124                         attr->ipv4 = 1;
125                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
126                         attr->ipv6 = 1;
127                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
128                         attr->tcp = 1;
129                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
130                         attr->udp = 1;
131                 attr->valid = 1;
132                 return;
133         }
134         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
135                 uint8_t next_protocol = 0xff;
136                 switch (item->type) {
137                 case RTE_FLOW_ITEM_TYPE_GRE:
138                 case RTE_FLOW_ITEM_TYPE_NVGRE:
139                 case RTE_FLOW_ITEM_TYPE_VXLAN:
140                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
141                 case RTE_FLOW_ITEM_TYPE_GENEVE:
142                 case RTE_FLOW_ITEM_TYPE_MPLS:
143                         if (tunnel_decap)
144                                 attr->attr = 0;
145                         break;
146                 case RTE_FLOW_ITEM_TYPE_IPV4:
147                         if (!attr->ipv6)
148                                 attr->ipv4 = 1;
149                         if (item->mask != NULL &&
150                             ((const struct rte_flow_item_ipv4 *)
151                             item->mask)->hdr.next_proto_id)
152                                 next_protocol =
153                                     ((const struct rte_flow_item_ipv4 *)
154                                       (item->spec))->hdr.next_proto_id &
155                                     ((const struct rte_flow_item_ipv4 *)
156                                       (item->mask))->hdr.next_proto_id;
157                         if ((next_protocol == IPPROTO_IPIP ||
158                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
159                                 attr->attr = 0;
160                         break;
161                 case RTE_FLOW_ITEM_TYPE_IPV6:
162                         if (!attr->ipv4)
163                                 attr->ipv6 = 1;
164                         if (item->mask != NULL &&
165                             ((const struct rte_flow_item_ipv6 *)
166                             item->mask)->hdr.proto)
167                                 next_protocol =
168                                     ((const struct rte_flow_item_ipv6 *)
169                                       (item->spec))->hdr.proto &
170                                     ((const struct rte_flow_item_ipv6 *)
171                                       (item->mask))->hdr.proto;
172                         if ((next_protocol == IPPROTO_IPIP ||
173                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
174                                 attr->attr = 0;
175                         break;
176                 case RTE_FLOW_ITEM_TYPE_UDP:
177                         if (!attr->tcp)
178                                 attr->udp = 1;
179                         break;
180                 case RTE_FLOW_ITEM_TYPE_TCP:
181                         if (!attr->udp)
182                                 attr->tcp = 1;
183                         break;
184                 default:
185                         break;
186                 }
187         }
188         attr->valid = 1;
189 }
190
191 /*
192  * Convert rte_mtr_color to mlx5 color.
193  *
194  * @param[in] rcol
195  *   rte_mtr_color.
196  *
197  * @return
198  *   mlx5 color.
199  */
200 static inline int
201 rte_col_2_mlx5_col(enum rte_color rcol)
202 {
203         switch (rcol) {
204         case RTE_COLOR_GREEN:
205                 return MLX5_FLOW_COLOR_GREEN;
206         case RTE_COLOR_YELLOW:
207                 return MLX5_FLOW_COLOR_YELLOW;
208         case RTE_COLOR_RED:
209                 return MLX5_FLOW_COLOR_RED;
210         default:
211                 break;
212         }
213         return MLX5_FLOW_COLOR_UNDEFINED;
214 }
215
216 struct field_modify_info {
217         uint32_t size; /* Size of field in protocol header, in bytes. */
218         uint32_t offset; /* Offset of field in protocol header, in bytes. */
219         enum mlx5_modification_field id;
220 };
221
222 struct field_modify_info modify_eth[] = {
223         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
224         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
225         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
226         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
227         {0, 0, 0},
228 };
229
230 struct field_modify_info modify_vlan_out_first_vid[] = {
231         /* Size in bits !!! */
232         {12, 0, MLX5_MODI_OUT_FIRST_VID},
233         {0, 0, 0},
234 };
235
236 struct field_modify_info modify_ipv4[] = {
237         {1,  1, MLX5_MODI_OUT_IP_DSCP},
238         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
239         {4, 12, MLX5_MODI_OUT_SIPV4},
240         {4, 16, MLX5_MODI_OUT_DIPV4},
241         {0, 0, 0},
242 };
243
244 struct field_modify_info modify_ipv6[] = {
245         {1,  0, MLX5_MODI_OUT_IP_DSCP},
246         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
247         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
248         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
249         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
250         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
251         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
252         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
253         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
254         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
255         {0, 0, 0},
256 };
257
258 struct field_modify_info modify_udp[] = {
259         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
260         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
261         {0, 0, 0},
262 };
263
264 struct field_modify_info modify_tcp[] = {
265         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
266         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
267         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
268         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
269         {0, 0, 0},
270 };
271
272 static const struct rte_flow_item *
273 mlx5_flow_find_tunnel_item(const struct rte_flow_item *item)
274 {
275         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
276                 switch (item->type) {
277                 default:
278                         break;
279                 case RTE_FLOW_ITEM_TYPE_VXLAN:
280                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
281                 case RTE_FLOW_ITEM_TYPE_GRE:
282                 case RTE_FLOW_ITEM_TYPE_MPLS:
283                 case RTE_FLOW_ITEM_TYPE_NVGRE:
284                 case RTE_FLOW_ITEM_TYPE_GENEVE:
285                         return item;
286                 case RTE_FLOW_ITEM_TYPE_IPV4:
287                 case RTE_FLOW_ITEM_TYPE_IPV6:
288                         if (item[1].type == RTE_FLOW_ITEM_TYPE_IPV4 ||
289                             item[1].type == RTE_FLOW_ITEM_TYPE_IPV6)
290                                 return item;
291                         break;
292                 }
293         }
294         return NULL;
295 }
296
297 static void
298 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
299                           uint8_t next_protocol, uint64_t *item_flags,
300                           int *tunnel)
301 {
302         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
303                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
304         if (next_protocol == IPPROTO_IPIP) {
305                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
306                 *tunnel = 1;
307         }
308         if (next_protocol == IPPROTO_IPV6) {
309                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
310                 *tunnel = 1;
311         }
312 }
313
314 static inline struct mlx5_hlist *
315 flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,
316                      const char *name, uint32_t size, bool direct_key,
317                      bool lcores_share, void *ctx,
318                      mlx5_list_create_cb cb_create,
319                      mlx5_list_match_cb cb_match,
320                      mlx5_list_remove_cb cb_remove,
321                      mlx5_list_clone_cb cb_clone,
322                      mlx5_list_clone_free_cb cb_clone_free)
323 {
324         struct mlx5_hlist *hl;
325         struct mlx5_hlist *expected = NULL;
326         char s[MLX5_NAME_SIZE];
327
328         hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
329         if (likely(hl))
330                 return hl;
331         snprintf(s, sizeof(s), "%s_%s", sh->ibdev_name, name);
332         hl = mlx5_hlist_create(s, size, direct_key, lcores_share,
333                         ctx, cb_create, cb_match, cb_remove, cb_clone,
334                         cb_clone_free);
335         if (!hl) {
336                 DRV_LOG(ERR, "%s hash creation failed", name);
337                 rte_errno = ENOMEM;
338                 return NULL;
339         }
340         if (!__atomic_compare_exchange_n(phl, &expected, hl, false,
341                                          __ATOMIC_SEQ_CST,
342                                          __ATOMIC_SEQ_CST)) {
343                 mlx5_hlist_destroy(hl);
344                 hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
345         }
346         return hl;
347 }
348
349 /* Update VLAN's VID/PCP based on input rte_flow_action.
350  *
351  * @param[in] action
352  *   Pointer to struct rte_flow_action.
353  * @param[out] vlan
354  *   Pointer to struct rte_vlan_hdr.
355  */
356 static void
357 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
358                          struct rte_vlan_hdr *vlan)
359 {
360         uint16_t vlan_tci;
361         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
362                 vlan_tci =
363                     ((const struct rte_flow_action_of_set_vlan_pcp *)
364                                                action->conf)->vlan_pcp;
365                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
366                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
367                 vlan->vlan_tci |= vlan_tci;
368         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
369                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
370                 vlan->vlan_tci |= rte_be_to_cpu_16
371                     (((const struct rte_flow_action_of_set_vlan_vid *)
372                                              action->conf)->vlan_vid);
373         }
374 }
375
376 /**
377  * Fetch 1, 2, 3 or 4 byte field from the byte array
378  * and return as unsigned integer in host-endian format.
379  *
380  * @param[in] data
381  *   Pointer to data array.
382  * @param[in] size
383  *   Size of field to extract.
384  *
385  * @return
386  *   converted field in host endian format.
387  */
388 static inline uint32_t
389 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
390 {
391         uint32_t ret;
392
393         switch (size) {
394         case 1:
395                 ret = *data;
396                 break;
397         case 2:
398                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
399                 break;
400         case 3:
401                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
402                 ret = (ret << 8) | *(data + sizeof(uint16_t));
403                 break;
404         case 4:
405                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
406                 break;
407         default:
408                 MLX5_ASSERT(false);
409                 ret = 0;
410                 break;
411         }
412         return ret;
413 }
414
415 /**
416  * Convert modify-header action to DV specification.
417  *
418  * Data length of each action is determined by provided field description
419  * and the item mask. Data bit offset and width of each action is determined
420  * by provided item mask.
421  *
422  * @param[in] item
423  *   Pointer to item specification.
424  * @param[in] field
425  *   Pointer to field modification information.
426  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
427  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
428  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
429  * @param[in] dcopy
430  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
431  *   Negative offset value sets the same offset as source offset.
432  *   size field is ignored, value is taken from source field.
433  * @param[in,out] resource
434  *   Pointer to the modify-header resource.
435  * @param[in] type
436  *   Type of modification.
437  * @param[out] error
438  *   Pointer to the error structure.
439  *
440  * @return
441  *   0 on success, a negative errno value otherwise and rte_errno is set.
442  */
443 static int
444 flow_dv_convert_modify_action(struct rte_flow_item *item,
445                               struct field_modify_info *field,
446                               struct field_modify_info *dcopy,
447                               struct mlx5_flow_dv_modify_hdr_resource *resource,
448                               uint32_t type, struct rte_flow_error *error)
449 {
450         uint32_t i = resource->actions_num;
451         struct mlx5_modification_cmd *actions = resource->actions;
452         uint32_t carry_b = 0;
453
454         /*
455          * The item and mask are provided in big-endian format.
456          * The fields should be presented as in big-endian format either.
457          * Mask must be always present, it defines the actual field width.
458          */
459         MLX5_ASSERT(item->mask);
460         MLX5_ASSERT(field->size);
461         do {
462                 uint32_t size_b;
463                 uint32_t off_b;
464                 uint32_t mask;
465                 uint32_t data;
466                 bool next_field = true;
467                 bool next_dcopy = true;
468
469                 if (i >= MLX5_MAX_MODIFY_NUM)
470                         return rte_flow_error_set(error, EINVAL,
471                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
472                                  "too many items to modify");
473                 /* Fetch variable byte size mask from the array. */
474                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
475                                            field->offset, field->size);
476                 if (!mask) {
477                         ++field;
478                         continue;
479                 }
480                 /* Deduce actual data width in bits from mask value. */
481                 off_b = rte_bsf32(mask) + carry_b;
482                 size_b = sizeof(uint32_t) * CHAR_BIT -
483                          off_b - __builtin_clz(mask);
484                 MLX5_ASSERT(size_b);
485                 actions[i] = (struct mlx5_modification_cmd) {
486                         .action_type = type,
487                         .field = field->id,
488                         .offset = off_b,
489                         .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ?
490                                 0 : size_b,
491                 };
492                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
493                         MLX5_ASSERT(dcopy);
494                         actions[i].dst_field = dcopy->id;
495                         actions[i].dst_offset =
496                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
497                         /* Convert entire record to big-endian format. */
498                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
499                         /*
500                          * Destination field overflow. Copy leftovers of
501                          * a source field to the next destination field.
502                          */
503                         carry_b = 0;
504                         if ((size_b > dcopy->size * CHAR_BIT - dcopy->offset) &&
505                             dcopy->size != 0) {
506                                 actions[i].length =
507                                         dcopy->size * CHAR_BIT - dcopy->offset;
508                                 carry_b = actions[i].length;
509                                 next_field = false;
510                         }
511                         /*
512                          * Not enough bits in a source filed to fill a
513                          * destination field. Switch to the next source.
514                          */
515                         if ((size_b < dcopy->size * CHAR_BIT - dcopy->offset) &&
516                             (size_b == field->size * CHAR_BIT - off_b)) {
517                                 actions[i].length =
518                                         field->size * CHAR_BIT - off_b;
519                                 dcopy->offset += actions[i].length;
520                                 next_dcopy = false;
521                         }
522                         if (next_dcopy)
523                                 ++dcopy;
524                 } else {
525                         MLX5_ASSERT(item->spec);
526                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
527                                                    field->offset, field->size);
528                         /* Shift out the trailing masked bits from data. */
529                         data = (data & mask) >> off_b;
530                         actions[i].data1 = rte_cpu_to_be_32(data);
531                 }
532                 /* Convert entire record to expected big-endian format. */
533                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
534                 if (next_field)
535                         ++field;
536                 ++i;
537         } while (field->size);
538         if (resource->actions_num == i)
539                 return rte_flow_error_set(error, EINVAL,
540                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
541                                           "invalid modification flow item");
542         resource->actions_num = i;
543         return 0;
544 }
545
546 /**
547  * Convert modify-header set IPv4 address action to DV specification.
548  *
549  * @param[in,out] resource
550  *   Pointer to the modify-header resource.
551  * @param[in] action
552  *   Pointer to action specification.
553  * @param[out] error
554  *   Pointer to the error structure.
555  *
556  * @return
557  *   0 on success, a negative errno value otherwise and rte_errno is set.
558  */
559 static int
560 flow_dv_convert_action_modify_ipv4
561                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
562                          const struct rte_flow_action *action,
563                          struct rte_flow_error *error)
564 {
565         const struct rte_flow_action_set_ipv4 *conf =
566                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
567         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
568         struct rte_flow_item_ipv4 ipv4;
569         struct rte_flow_item_ipv4 ipv4_mask;
570
571         memset(&ipv4, 0, sizeof(ipv4));
572         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
573         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
574                 ipv4.hdr.src_addr = conf->ipv4_addr;
575                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
576         } else {
577                 ipv4.hdr.dst_addr = conf->ipv4_addr;
578                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
579         }
580         item.spec = &ipv4;
581         item.mask = &ipv4_mask;
582         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
583                                              MLX5_MODIFICATION_TYPE_SET, error);
584 }
585
586 /**
587  * Convert modify-header set IPv6 address action to DV specification.
588  *
589  * @param[in,out] resource
590  *   Pointer to the modify-header resource.
591  * @param[in] action
592  *   Pointer to action specification.
593  * @param[out] error
594  *   Pointer to the error structure.
595  *
596  * @return
597  *   0 on success, a negative errno value otherwise and rte_errno is set.
598  */
599 static int
600 flow_dv_convert_action_modify_ipv6
601                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
602                          const struct rte_flow_action *action,
603                          struct rte_flow_error *error)
604 {
605         const struct rte_flow_action_set_ipv6 *conf =
606                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
607         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
608         struct rte_flow_item_ipv6 ipv6;
609         struct rte_flow_item_ipv6 ipv6_mask;
610
611         memset(&ipv6, 0, sizeof(ipv6));
612         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
613         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
614                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
615                        sizeof(ipv6.hdr.src_addr));
616                 memcpy(&ipv6_mask.hdr.src_addr,
617                        &rte_flow_item_ipv6_mask.hdr.src_addr,
618                        sizeof(ipv6.hdr.src_addr));
619         } else {
620                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
621                        sizeof(ipv6.hdr.dst_addr));
622                 memcpy(&ipv6_mask.hdr.dst_addr,
623                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
624                        sizeof(ipv6.hdr.dst_addr));
625         }
626         item.spec = &ipv6;
627         item.mask = &ipv6_mask;
628         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
629                                              MLX5_MODIFICATION_TYPE_SET, error);
630 }
631
632 /**
633  * Convert modify-header set MAC address action to DV specification.
634  *
635  * @param[in,out] resource
636  *   Pointer to the modify-header resource.
637  * @param[in] action
638  *   Pointer to action specification.
639  * @param[out] error
640  *   Pointer to the error structure.
641  *
642  * @return
643  *   0 on success, a negative errno value otherwise and rte_errno is set.
644  */
645 static int
646 flow_dv_convert_action_modify_mac
647                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
648                          const struct rte_flow_action *action,
649                          struct rte_flow_error *error)
650 {
651         const struct rte_flow_action_set_mac *conf =
652                 (const struct rte_flow_action_set_mac *)(action->conf);
653         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
654         struct rte_flow_item_eth eth;
655         struct rte_flow_item_eth eth_mask;
656
657         memset(&eth, 0, sizeof(eth));
658         memset(&eth_mask, 0, sizeof(eth_mask));
659         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
660                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
661                        sizeof(eth.src.addr_bytes));
662                 memcpy(&eth_mask.src.addr_bytes,
663                        &rte_flow_item_eth_mask.src.addr_bytes,
664                        sizeof(eth_mask.src.addr_bytes));
665         } else {
666                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
667                        sizeof(eth.dst.addr_bytes));
668                 memcpy(&eth_mask.dst.addr_bytes,
669                        &rte_flow_item_eth_mask.dst.addr_bytes,
670                        sizeof(eth_mask.dst.addr_bytes));
671         }
672         item.spec = &eth;
673         item.mask = &eth_mask;
674         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
675                                              MLX5_MODIFICATION_TYPE_SET, error);
676 }
677
678 /**
679  * Convert modify-header set VLAN VID action to DV specification.
680  *
681  * @param[in,out] resource
682  *   Pointer to the modify-header resource.
683  * @param[in] action
684  *   Pointer to action specification.
685  * @param[out] error
686  *   Pointer to the error structure.
687  *
688  * @return
689  *   0 on success, a negative errno value otherwise and rte_errno is set.
690  */
691 static int
692 flow_dv_convert_action_modify_vlan_vid
693                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
694                          const struct rte_flow_action *action,
695                          struct rte_flow_error *error)
696 {
697         const struct rte_flow_action_of_set_vlan_vid *conf =
698                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
699         int i = resource->actions_num;
700         struct mlx5_modification_cmd *actions = resource->actions;
701         struct field_modify_info *field = modify_vlan_out_first_vid;
702
703         if (i >= MLX5_MAX_MODIFY_NUM)
704                 return rte_flow_error_set(error, EINVAL,
705                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
706                          "too many items to modify");
707         actions[i] = (struct mlx5_modification_cmd) {
708                 .action_type = MLX5_MODIFICATION_TYPE_SET,
709                 .field = field->id,
710                 .length = field->size,
711                 .offset = field->offset,
712         };
713         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
714         actions[i].data1 = conf->vlan_vid;
715         actions[i].data1 = actions[i].data1 << 16;
716         resource->actions_num = ++i;
717         return 0;
718 }
719
720 /**
721  * Convert modify-header set TP action to DV specification.
722  *
723  * @param[in,out] resource
724  *   Pointer to the modify-header resource.
725  * @param[in] action
726  *   Pointer to action specification.
727  * @param[in] items
728  *   Pointer to rte_flow_item objects list.
729  * @param[in] attr
730  *   Pointer to flow attributes structure.
731  * @param[in] dev_flow
732  *   Pointer to the sub flow.
733  * @param[in] tunnel_decap
734  *   Whether action is after tunnel decapsulation.
735  * @param[out] error
736  *   Pointer to the error structure.
737  *
738  * @return
739  *   0 on success, a negative errno value otherwise and rte_errno is set.
740  */
741 static int
742 flow_dv_convert_action_modify_tp
743                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
744                          const struct rte_flow_action *action,
745                          const struct rte_flow_item *items,
746                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
747                          bool tunnel_decap, struct rte_flow_error *error)
748 {
749         const struct rte_flow_action_set_tp *conf =
750                 (const struct rte_flow_action_set_tp *)(action->conf);
751         struct rte_flow_item item;
752         struct rte_flow_item_udp udp;
753         struct rte_flow_item_udp udp_mask;
754         struct rte_flow_item_tcp tcp;
755         struct rte_flow_item_tcp tcp_mask;
756         struct field_modify_info *field;
757
758         if (!attr->valid)
759                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
760         if (attr->udp) {
761                 memset(&udp, 0, sizeof(udp));
762                 memset(&udp_mask, 0, sizeof(udp_mask));
763                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
764                         udp.hdr.src_port = conf->port;
765                         udp_mask.hdr.src_port =
766                                         rte_flow_item_udp_mask.hdr.src_port;
767                 } else {
768                         udp.hdr.dst_port = conf->port;
769                         udp_mask.hdr.dst_port =
770                                         rte_flow_item_udp_mask.hdr.dst_port;
771                 }
772                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
773                 item.spec = &udp;
774                 item.mask = &udp_mask;
775                 field = modify_udp;
776         } else {
777                 MLX5_ASSERT(attr->tcp);
778                 memset(&tcp, 0, sizeof(tcp));
779                 memset(&tcp_mask, 0, sizeof(tcp_mask));
780                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
781                         tcp.hdr.src_port = conf->port;
782                         tcp_mask.hdr.src_port =
783                                         rte_flow_item_tcp_mask.hdr.src_port;
784                 } else {
785                         tcp.hdr.dst_port = conf->port;
786                         tcp_mask.hdr.dst_port =
787                                         rte_flow_item_tcp_mask.hdr.dst_port;
788                 }
789                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
790                 item.spec = &tcp;
791                 item.mask = &tcp_mask;
792                 field = modify_tcp;
793         }
794         return flow_dv_convert_modify_action(&item, field, NULL, resource,
795                                              MLX5_MODIFICATION_TYPE_SET, error);
796 }
797
798 /**
799  * Convert modify-header set TTL action to DV specification.
800  *
801  * @param[in,out] resource
802  *   Pointer to the modify-header resource.
803  * @param[in] action
804  *   Pointer to action specification.
805  * @param[in] items
806  *   Pointer to rte_flow_item objects list.
807  * @param[in] attr
808  *   Pointer to flow attributes structure.
809  * @param[in] dev_flow
810  *   Pointer to the sub flow.
811  * @param[in] tunnel_decap
812  *   Whether action is after tunnel decapsulation.
813  * @param[out] error
814  *   Pointer to the error structure.
815  *
816  * @return
817  *   0 on success, a negative errno value otherwise and rte_errno is set.
818  */
819 static int
820 flow_dv_convert_action_modify_ttl
821                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
822                          const struct rte_flow_action *action,
823                          const struct rte_flow_item *items,
824                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
825                          bool tunnel_decap, struct rte_flow_error *error)
826 {
827         const struct rte_flow_action_set_ttl *conf =
828                 (const struct rte_flow_action_set_ttl *)(action->conf);
829         struct rte_flow_item item;
830         struct rte_flow_item_ipv4 ipv4;
831         struct rte_flow_item_ipv4 ipv4_mask;
832         struct rte_flow_item_ipv6 ipv6;
833         struct rte_flow_item_ipv6 ipv6_mask;
834         struct field_modify_info *field;
835
836         if (!attr->valid)
837                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
838         if (attr->ipv4) {
839                 memset(&ipv4, 0, sizeof(ipv4));
840                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
841                 ipv4.hdr.time_to_live = conf->ttl_value;
842                 ipv4_mask.hdr.time_to_live = 0xFF;
843                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
844                 item.spec = &ipv4;
845                 item.mask = &ipv4_mask;
846                 field = modify_ipv4;
847         } else {
848                 MLX5_ASSERT(attr->ipv6);
849                 memset(&ipv6, 0, sizeof(ipv6));
850                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
851                 ipv6.hdr.hop_limits = conf->ttl_value;
852                 ipv6_mask.hdr.hop_limits = 0xFF;
853                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
854                 item.spec = &ipv6;
855                 item.mask = &ipv6_mask;
856                 field = modify_ipv6;
857         }
858         return flow_dv_convert_modify_action(&item, field, NULL, resource,
859                                              MLX5_MODIFICATION_TYPE_SET, error);
860 }
861
862 /**
863  * Convert modify-header decrement TTL action to DV specification.
864  *
865  * @param[in,out] resource
866  *   Pointer to the modify-header resource.
867  * @param[in] action
868  *   Pointer to action specification.
869  * @param[in] items
870  *   Pointer to rte_flow_item objects list.
871  * @param[in] attr
872  *   Pointer to flow attributes structure.
873  * @param[in] dev_flow
874  *   Pointer to the sub flow.
875  * @param[in] tunnel_decap
876  *   Whether action is after tunnel decapsulation.
877  * @param[out] error
878  *   Pointer to the error structure.
879  *
880  * @return
881  *   0 on success, a negative errno value otherwise and rte_errno is set.
882  */
883 static int
884 flow_dv_convert_action_modify_dec_ttl
885                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
886                          const struct rte_flow_item *items,
887                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
888                          bool tunnel_decap, struct rte_flow_error *error)
889 {
890         struct rte_flow_item item;
891         struct rte_flow_item_ipv4 ipv4;
892         struct rte_flow_item_ipv4 ipv4_mask;
893         struct rte_flow_item_ipv6 ipv6;
894         struct rte_flow_item_ipv6 ipv6_mask;
895         struct field_modify_info *field;
896
897         if (!attr->valid)
898                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
899         if (attr->ipv4) {
900                 memset(&ipv4, 0, sizeof(ipv4));
901                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
902                 ipv4.hdr.time_to_live = 0xFF;
903                 ipv4_mask.hdr.time_to_live = 0xFF;
904                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
905                 item.spec = &ipv4;
906                 item.mask = &ipv4_mask;
907                 field = modify_ipv4;
908         } else {
909                 MLX5_ASSERT(attr->ipv6);
910                 memset(&ipv6, 0, sizeof(ipv6));
911                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
912                 ipv6.hdr.hop_limits = 0xFF;
913                 ipv6_mask.hdr.hop_limits = 0xFF;
914                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
915                 item.spec = &ipv6;
916                 item.mask = &ipv6_mask;
917                 field = modify_ipv6;
918         }
919         return flow_dv_convert_modify_action(&item, field, NULL, resource,
920                                              MLX5_MODIFICATION_TYPE_ADD, error);
921 }
922
923 /**
924  * Convert modify-header increment/decrement TCP Sequence number
925  * to DV specification.
926  *
927  * @param[in,out] resource
928  *   Pointer to the modify-header resource.
929  * @param[in] action
930  *   Pointer to action specification.
931  * @param[out] error
932  *   Pointer to the error structure.
933  *
934  * @return
935  *   0 on success, a negative errno value otherwise and rte_errno is set.
936  */
937 static int
938 flow_dv_convert_action_modify_tcp_seq
939                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
940                          const struct rte_flow_action *action,
941                          struct rte_flow_error *error)
942 {
943         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
944         uint64_t value = rte_be_to_cpu_32(*conf);
945         struct rte_flow_item item;
946         struct rte_flow_item_tcp tcp;
947         struct rte_flow_item_tcp tcp_mask;
948
949         memset(&tcp, 0, sizeof(tcp));
950         memset(&tcp_mask, 0, sizeof(tcp_mask));
951         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
952                 /*
953                  * The HW has no decrement operation, only increment operation.
954                  * To simulate decrement X from Y using increment operation
955                  * we need to add UINT32_MAX X times to Y.
956                  * Each adding of UINT32_MAX decrements Y by 1.
957                  */
958                 value *= UINT32_MAX;
959         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
960         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
961         item.type = RTE_FLOW_ITEM_TYPE_TCP;
962         item.spec = &tcp;
963         item.mask = &tcp_mask;
964         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
965                                              MLX5_MODIFICATION_TYPE_ADD, error);
966 }
967
968 /**
969  * Convert modify-header increment/decrement TCP Acknowledgment number
970  * to DV specification.
971  *
972  * @param[in,out] resource
973  *   Pointer to the modify-header resource.
974  * @param[in] action
975  *   Pointer to action specification.
976  * @param[out] error
977  *   Pointer to the error structure.
978  *
979  * @return
980  *   0 on success, a negative errno value otherwise and rte_errno is set.
981  */
982 static int
983 flow_dv_convert_action_modify_tcp_ack
984                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
985                          const struct rte_flow_action *action,
986                          struct rte_flow_error *error)
987 {
988         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
989         uint64_t value = rte_be_to_cpu_32(*conf);
990         struct rte_flow_item item;
991         struct rte_flow_item_tcp tcp;
992         struct rte_flow_item_tcp tcp_mask;
993
994         memset(&tcp, 0, sizeof(tcp));
995         memset(&tcp_mask, 0, sizeof(tcp_mask));
996         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
997                 /*
998                  * The HW has no decrement operation, only increment operation.
999                  * To simulate decrement X from Y using increment operation
1000                  * we need to add UINT32_MAX X times to Y.
1001                  * Each adding of UINT32_MAX decrements Y by 1.
1002                  */
1003                 value *= UINT32_MAX;
1004         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
1005         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
1006         item.type = RTE_FLOW_ITEM_TYPE_TCP;
1007         item.spec = &tcp;
1008         item.mask = &tcp_mask;
1009         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
1010                                              MLX5_MODIFICATION_TYPE_ADD, error);
1011 }
1012
1013 static enum mlx5_modification_field reg_to_field[] = {
1014         [REG_NON] = MLX5_MODI_OUT_NONE,
1015         [REG_A] = MLX5_MODI_META_DATA_REG_A,
1016         [REG_B] = MLX5_MODI_META_DATA_REG_B,
1017         [REG_C_0] = MLX5_MODI_META_REG_C_0,
1018         [REG_C_1] = MLX5_MODI_META_REG_C_1,
1019         [REG_C_2] = MLX5_MODI_META_REG_C_2,
1020         [REG_C_3] = MLX5_MODI_META_REG_C_3,
1021         [REG_C_4] = MLX5_MODI_META_REG_C_4,
1022         [REG_C_5] = MLX5_MODI_META_REG_C_5,
1023         [REG_C_6] = MLX5_MODI_META_REG_C_6,
1024         [REG_C_7] = MLX5_MODI_META_REG_C_7,
1025 };
1026
1027 /**
1028  * Convert register set to DV specification.
1029  *
1030  * @param[in,out] resource
1031  *   Pointer to the modify-header resource.
1032  * @param[in] action
1033  *   Pointer to action specification.
1034  * @param[out] error
1035  *   Pointer to the error structure.
1036  *
1037  * @return
1038  *   0 on success, a negative errno value otherwise and rte_errno is set.
1039  */
1040 static int
1041 flow_dv_convert_action_set_reg
1042                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1043                          const struct rte_flow_action *action,
1044                          struct rte_flow_error *error)
1045 {
1046         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
1047         struct mlx5_modification_cmd *actions = resource->actions;
1048         uint32_t i = resource->actions_num;
1049
1050         if (i >= MLX5_MAX_MODIFY_NUM)
1051                 return rte_flow_error_set(error, EINVAL,
1052                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1053                                           "too many items to modify");
1054         MLX5_ASSERT(conf->id != REG_NON);
1055         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
1056         actions[i] = (struct mlx5_modification_cmd) {
1057                 .action_type = MLX5_MODIFICATION_TYPE_SET,
1058                 .field = reg_to_field[conf->id],
1059                 .offset = conf->offset,
1060                 .length = conf->length,
1061         };
1062         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1063         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1064         ++i;
1065         resource->actions_num = i;
1066         return 0;
1067 }
1068
1069 /**
1070  * Convert SET_TAG action to DV specification.
1071  *
1072  * @param[in] dev
1073  *   Pointer to the rte_eth_dev structure.
1074  * @param[in,out] resource
1075  *   Pointer to the modify-header resource.
1076  * @param[in] conf
1077  *   Pointer to action specification.
1078  * @param[out] error
1079  *   Pointer to the error structure.
1080  *
1081  * @return
1082  *   0 on success, a negative errno value otherwise and rte_errno is set.
1083  */
1084 static int
1085 flow_dv_convert_action_set_tag
1086                         (struct rte_eth_dev *dev,
1087                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1088                          const struct rte_flow_action_set_tag *conf,
1089                          struct rte_flow_error *error)
1090 {
1091         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1092         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1093         struct rte_flow_item item = {
1094                 .spec = &data,
1095                 .mask = &mask,
1096         };
1097         struct field_modify_info reg_c_x[] = {
1098                 [1] = {0, 0, 0},
1099         };
1100         enum mlx5_modification_field reg_type;
1101         int ret;
1102
1103         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1104         if (ret < 0)
1105                 return ret;
1106         MLX5_ASSERT(ret != REG_NON);
1107         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1108         reg_type = reg_to_field[ret];
1109         MLX5_ASSERT(reg_type > 0);
1110         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1111         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1112                                              MLX5_MODIFICATION_TYPE_SET, error);
1113 }
1114
1115 /**
1116  * Convert internal COPY_REG action to DV specification.
1117  *
1118  * @param[in] dev
1119  *   Pointer to the rte_eth_dev structure.
1120  * @param[in,out] res
1121  *   Pointer to the modify-header resource.
1122  * @param[in] action
1123  *   Pointer to action specification.
1124  * @param[out] error
1125  *   Pointer to the error structure.
1126  *
1127  * @return
1128  *   0 on success, a negative errno value otherwise and rte_errno is set.
1129  */
1130 static int
1131 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1132                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1133                                  const struct rte_flow_action *action,
1134                                  struct rte_flow_error *error)
1135 {
1136         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1137         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1138         struct rte_flow_item item = {
1139                 .spec = NULL,
1140                 .mask = &mask,
1141         };
1142         struct field_modify_info reg_src[] = {
1143                 {4, 0, reg_to_field[conf->src]},
1144                 {0, 0, 0},
1145         };
1146         struct field_modify_info reg_dst = {
1147                 .offset = 0,
1148                 .id = reg_to_field[conf->dst],
1149         };
1150         /* Adjust reg_c[0] usage according to reported mask. */
1151         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1152                 struct mlx5_priv *priv = dev->data->dev_private;
1153                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1154
1155                 MLX5_ASSERT(reg_c0);
1156                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1157                 if (conf->dst == REG_C_0) {
1158                         /* Copy to reg_c[0], within mask only. */
1159                         reg_dst.offset = rte_bsf32(reg_c0);
1160                         /*
1161                          * Mask is ignoring the enianness, because
1162                          * there is no conversion in datapath.
1163                          */
1164 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1165                         /* Copy from destination lower bits to reg_c[0]. */
1166                         mask = reg_c0 >> reg_dst.offset;
1167 #else
1168                         /* Copy from destination upper bits to reg_c[0]. */
1169                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1170                                           rte_fls_u32(reg_c0));
1171 #endif
1172                 } else {
1173                         mask = rte_cpu_to_be_32(reg_c0);
1174 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1175                         /* Copy from reg_c[0] to destination lower bits. */
1176                         reg_dst.offset = 0;
1177 #else
1178                         /* Copy from reg_c[0] to destination upper bits. */
1179                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1180                                          (rte_fls_u32(reg_c0) -
1181                                           rte_bsf32(reg_c0));
1182 #endif
1183                 }
1184         }
1185         return flow_dv_convert_modify_action(&item,
1186                                              reg_src, &reg_dst, res,
1187                                              MLX5_MODIFICATION_TYPE_COPY,
1188                                              error);
1189 }
1190
1191 /**
1192  * Convert MARK action to DV specification. This routine is used
1193  * in extensive metadata only and requires metadata register to be
1194  * handled. In legacy mode hardware tag resource is engaged.
1195  *
1196  * @param[in] dev
1197  *   Pointer to the rte_eth_dev structure.
1198  * @param[in] conf
1199  *   Pointer to MARK action specification.
1200  * @param[in,out] resource
1201  *   Pointer to the modify-header resource.
1202  * @param[out] error
1203  *   Pointer to the error structure.
1204  *
1205  * @return
1206  *   0 on success, a negative errno value otherwise and rte_errno is set.
1207  */
1208 static int
1209 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1210                             const struct rte_flow_action_mark *conf,
1211                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1212                             struct rte_flow_error *error)
1213 {
1214         struct mlx5_priv *priv = dev->data->dev_private;
1215         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1216                                            priv->sh->dv_mark_mask);
1217         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1218         struct rte_flow_item item = {
1219                 .spec = &data,
1220                 .mask = &mask,
1221         };
1222         struct field_modify_info reg_c_x[] = {
1223                 [1] = {0, 0, 0},
1224         };
1225         int reg;
1226
1227         if (!mask)
1228                 return rte_flow_error_set(error, EINVAL,
1229                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1230                                           NULL, "zero mark action mask");
1231         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1232         if (reg < 0)
1233                 return reg;
1234         MLX5_ASSERT(reg > 0);
1235         if (reg == REG_C_0) {
1236                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1237                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1238
1239                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1240                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1241                 mask = rte_cpu_to_be_32(mask << shl_c0);
1242         }
1243         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1244         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1245                                              MLX5_MODIFICATION_TYPE_SET, error);
1246 }
1247
1248 /**
1249  * Get metadata register index for specified steering domain.
1250  *
1251  * @param[in] dev
1252  *   Pointer to the rte_eth_dev structure.
1253  * @param[in] attr
1254  *   Attributes of flow to determine steering domain.
1255  * @param[out] error
1256  *   Pointer to the error structure.
1257  *
1258  * @return
1259  *   positive index on success, a negative errno value otherwise
1260  *   and rte_errno is set.
1261  */
1262 static enum modify_reg
1263 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1264                          const struct rte_flow_attr *attr,
1265                          struct rte_flow_error *error)
1266 {
1267         int reg =
1268                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1269                                           MLX5_METADATA_FDB :
1270                                             attr->egress ?
1271                                             MLX5_METADATA_TX :
1272                                             MLX5_METADATA_RX, 0, error);
1273         if (reg < 0)
1274                 return rte_flow_error_set(error,
1275                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1276                                           NULL, "unavailable "
1277                                           "metadata register");
1278         return reg;
1279 }
1280
1281 /**
1282  * Convert SET_META action to DV specification.
1283  *
1284  * @param[in] dev
1285  *   Pointer to the rte_eth_dev structure.
1286  * @param[in,out] resource
1287  *   Pointer to the modify-header resource.
1288  * @param[in] attr
1289  *   Attributes of flow that includes this item.
1290  * @param[in] conf
1291  *   Pointer to action specification.
1292  * @param[out] error
1293  *   Pointer to the error structure.
1294  *
1295  * @return
1296  *   0 on success, a negative errno value otherwise and rte_errno is set.
1297  */
1298 static int
1299 flow_dv_convert_action_set_meta
1300                         (struct rte_eth_dev *dev,
1301                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1302                          const struct rte_flow_attr *attr,
1303                          const struct rte_flow_action_set_meta *conf,
1304                          struct rte_flow_error *error)
1305 {
1306         uint32_t mask = rte_cpu_to_be_32(conf->mask);
1307         uint32_t data = rte_cpu_to_be_32(conf->data) & mask;
1308         struct rte_flow_item item = {
1309                 .spec = &data,
1310                 .mask = &mask,
1311         };
1312         struct field_modify_info reg_c_x[] = {
1313                 [1] = {0, 0, 0},
1314         };
1315         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1316
1317         if (reg < 0)
1318                 return reg;
1319         MLX5_ASSERT(reg != REG_NON);
1320         if (reg == REG_C_0) {
1321                 struct mlx5_priv *priv = dev->data->dev_private;
1322                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1323                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1324
1325                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1326                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1327                 mask = rte_cpu_to_be_32(mask << shl_c0);
1328         }
1329         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1330         /* The routine expects parameters in memory as big-endian ones. */
1331         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1332                                              MLX5_MODIFICATION_TYPE_SET, error);
1333 }
1334
1335 /**
1336  * Convert modify-header set IPv4 DSCP action to DV specification.
1337  *
1338  * @param[in,out] resource
1339  *   Pointer to the modify-header resource.
1340  * @param[in] action
1341  *   Pointer to action specification.
1342  * @param[out] error
1343  *   Pointer to the error structure.
1344  *
1345  * @return
1346  *   0 on success, a negative errno value otherwise and rte_errno is set.
1347  */
1348 static int
1349 flow_dv_convert_action_modify_ipv4_dscp
1350                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1351                          const struct rte_flow_action *action,
1352                          struct rte_flow_error *error)
1353 {
1354         const struct rte_flow_action_set_dscp *conf =
1355                 (const struct rte_flow_action_set_dscp *)(action->conf);
1356         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1357         struct rte_flow_item_ipv4 ipv4;
1358         struct rte_flow_item_ipv4 ipv4_mask;
1359
1360         memset(&ipv4, 0, sizeof(ipv4));
1361         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1362         ipv4.hdr.type_of_service = conf->dscp;
1363         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1364         item.spec = &ipv4;
1365         item.mask = &ipv4_mask;
1366         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1367                                              MLX5_MODIFICATION_TYPE_SET, error);
1368 }
1369
1370 /**
1371  * Convert modify-header set IPv6 DSCP action to DV specification.
1372  *
1373  * @param[in,out] resource
1374  *   Pointer to the modify-header resource.
1375  * @param[in] action
1376  *   Pointer to action specification.
1377  * @param[out] error
1378  *   Pointer to the error structure.
1379  *
1380  * @return
1381  *   0 on success, a negative errno value otherwise and rte_errno is set.
1382  */
1383 static int
1384 flow_dv_convert_action_modify_ipv6_dscp
1385                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1386                          const struct rte_flow_action *action,
1387                          struct rte_flow_error *error)
1388 {
1389         const struct rte_flow_action_set_dscp *conf =
1390                 (const struct rte_flow_action_set_dscp *)(action->conf);
1391         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1392         struct rte_flow_item_ipv6 ipv6;
1393         struct rte_flow_item_ipv6 ipv6_mask;
1394
1395         memset(&ipv6, 0, sizeof(ipv6));
1396         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1397         /*
1398          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1399          * rdma-core only accept the DSCP bits byte aligned start from
1400          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1401          * bits in IPv6 case as rdma-core requires byte aligned value.
1402          */
1403         ipv6.hdr.vtc_flow = conf->dscp;
1404         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1405         item.spec = &ipv6;
1406         item.mask = &ipv6_mask;
1407         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1408                                              MLX5_MODIFICATION_TYPE_SET, error);
1409 }
1410
1411 static int
1412 mlx5_flow_item_field_width(struct mlx5_dev_config *config,
1413                            enum rte_flow_field_id field)
1414 {
1415         switch (field) {
1416         case RTE_FLOW_FIELD_START:
1417                 return 32;
1418         case RTE_FLOW_FIELD_MAC_DST:
1419         case RTE_FLOW_FIELD_MAC_SRC:
1420                 return 48;
1421         case RTE_FLOW_FIELD_VLAN_TYPE:
1422                 return 16;
1423         case RTE_FLOW_FIELD_VLAN_ID:
1424                 return 12;
1425         case RTE_FLOW_FIELD_MAC_TYPE:
1426                 return 16;
1427         case RTE_FLOW_FIELD_IPV4_DSCP:
1428                 return 6;
1429         case RTE_FLOW_FIELD_IPV4_TTL:
1430                 return 8;
1431         case RTE_FLOW_FIELD_IPV4_SRC:
1432         case RTE_FLOW_FIELD_IPV4_DST:
1433                 return 32;
1434         case RTE_FLOW_FIELD_IPV6_DSCP:
1435                 return 6;
1436         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1437                 return 8;
1438         case RTE_FLOW_FIELD_IPV6_SRC:
1439         case RTE_FLOW_FIELD_IPV6_DST:
1440                 return 128;
1441         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1442         case RTE_FLOW_FIELD_TCP_PORT_DST:
1443                 return 16;
1444         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1445         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1446                 return 32;
1447         case RTE_FLOW_FIELD_TCP_FLAGS:
1448                 return 9;
1449         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1450         case RTE_FLOW_FIELD_UDP_PORT_DST:
1451                 return 16;
1452         case RTE_FLOW_FIELD_VXLAN_VNI:
1453         case RTE_FLOW_FIELD_GENEVE_VNI:
1454                 return 24;
1455         case RTE_FLOW_FIELD_GTP_TEID:
1456         case RTE_FLOW_FIELD_TAG:
1457                 return 32;
1458         case RTE_FLOW_FIELD_MARK:
1459                 return 24;
1460         case RTE_FLOW_FIELD_META:
1461                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_META16)
1462                         return 16;
1463                 else if (config->dv_xmeta_en == MLX5_XMETA_MODE_META32)
1464                         return 32;
1465                 else
1466                         return 0;
1467         case RTE_FLOW_FIELD_POINTER:
1468         case RTE_FLOW_FIELD_VALUE:
1469                 return 64;
1470         default:
1471                 MLX5_ASSERT(false);
1472         }
1473         return 0;
1474 }
1475
1476 static void
1477 mlx5_flow_field_id_to_modify_info
1478                 (const struct rte_flow_action_modify_data *data,
1479                  struct field_modify_info *info,
1480                  uint32_t *mask, uint32_t *value,
1481                  uint32_t width, uint32_t dst_width,
1482                  struct rte_eth_dev *dev,
1483                  const struct rte_flow_attr *attr,
1484                  struct rte_flow_error *error)
1485 {
1486         struct mlx5_priv *priv = dev->data->dev_private;
1487         struct mlx5_dev_config *config = &priv->config;
1488         uint32_t idx = 0;
1489         uint32_t off = 0;
1490         uint64_t val = 0;
1491         switch (data->field) {
1492         case RTE_FLOW_FIELD_START:
1493                 /* not supported yet */
1494                 MLX5_ASSERT(false);
1495                 break;
1496         case RTE_FLOW_FIELD_MAC_DST:
1497                 off = data->offset > 16 ? data->offset - 16 : 0;
1498                 if (mask) {
1499                         if (data->offset < 16) {
1500                                 info[idx] = (struct field_modify_info){2, 0,
1501                                                 MLX5_MODI_OUT_DMAC_15_0};
1502                                 if (width < 16) {
1503                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1504                                                                  (16 - width));
1505                                         width = 0;
1506                                 } else {
1507                                         mask[idx] = RTE_BE16(0xffff);
1508                                         width -= 16;
1509                                 }
1510                                 if (!width)
1511                                         break;
1512                                 ++idx;
1513                         }
1514                         info[idx] = (struct field_modify_info){4, 4 * idx,
1515                                                 MLX5_MODI_OUT_DMAC_47_16};
1516                         mask[idx] = rte_cpu_to_be_32((0xffffffff >>
1517                                                       (32 - width)) << off);
1518                 } else {
1519                         if (data->offset < 16)
1520                                 info[idx++] = (struct field_modify_info){2, 0,
1521                                                 MLX5_MODI_OUT_DMAC_15_0};
1522                         info[idx] = (struct field_modify_info){4, off,
1523                                                 MLX5_MODI_OUT_DMAC_47_16};
1524                 }
1525                 break;
1526         case RTE_FLOW_FIELD_MAC_SRC:
1527                 off = data->offset > 16 ? data->offset - 16 : 0;
1528                 if (mask) {
1529                         if (data->offset < 16) {
1530                                 info[idx] = (struct field_modify_info){2, 0,
1531                                                 MLX5_MODI_OUT_SMAC_15_0};
1532                                 if (width < 16) {
1533                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1534                                                                  (16 - width));
1535                                         width = 0;
1536                                 } else {
1537                                         mask[idx] = RTE_BE16(0xffff);
1538                                         width -= 16;
1539                                 }
1540                                 if (!width)
1541                                         break;
1542                                 ++idx;
1543                         }
1544                         info[idx] = (struct field_modify_info){4, 4 * idx,
1545                                                 MLX5_MODI_OUT_SMAC_47_16};
1546                         mask[idx] = rte_cpu_to_be_32((0xffffffff >>
1547                                                       (32 - width)) << off);
1548                 } else {
1549                         if (data->offset < 16)
1550                                 info[idx++] = (struct field_modify_info){2, 0,
1551                                                 MLX5_MODI_OUT_SMAC_15_0};
1552                         info[idx] = (struct field_modify_info){4, off,
1553                                                 MLX5_MODI_OUT_SMAC_47_16};
1554                 }
1555                 break;
1556         case RTE_FLOW_FIELD_VLAN_TYPE:
1557                 /* not supported yet */
1558                 break;
1559         case RTE_FLOW_FIELD_VLAN_ID:
1560                 info[idx] = (struct field_modify_info){2, 0,
1561                                         MLX5_MODI_OUT_FIRST_VID};
1562                 if (mask)
1563                         mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1564                 break;
1565         case RTE_FLOW_FIELD_MAC_TYPE:
1566                 info[idx] = (struct field_modify_info){2, 0,
1567                                         MLX5_MODI_OUT_ETHERTYPE};
1568                 if (mask)
1569                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1570                 break;
1571         case RTE_FLOW_FIELD_IPV4_DSCP:
1572                 info[idx] = (struct field_modify_info){1, 0,
1573                                         MLX5_MODI_OUT_IP_DSCP};
1574                 if (mask)
1575                         mask[idx] = 0x3f >> (6 - width);
1576                 break;
1577         case RTE_FLOW_FIELD_IPV4_TTL:
1578                 info[idx] = (struct field_modify_info){1, 0,
1579                                         MLX5_MODI_OUT_IPV4_TTL};
1580                 if (mask)
1581                         mask[idx] = 0xff >> (8 - width);
1582                 break;
1583         case RTE_FLOW_FIELD_IPV4_SRC:
1584                 info[idx] = (struct field_modify_info){4, 0,
1585                                         MLX5_MODI_OUT_SIPV4};
1586                 if (mask)
1587                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1588                                                      (32 - width));
1589                 break;
1590         case RTE_FLOW_FIELD_IPV4_DST:
1591                 info[idx] = (struct field_modify_info){4, 0,
1592                                         MLX5_MODI_OUT_DIPV4};
1593                 if (mask)
1594                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1595                                                      (32 - width));
1596                 break;
1597         case RTE_FLOW_FIELD_IPV6_DSCP:
1598                 info[idx] = (struct field_modify_info){1, 0,
1599                                         MLX5_MODI_OUT_IP_DSCP};
1600                 if (mask)
1601                         mask[idx] = 0x3f >> (6 - width);
1602                 break;
1603         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1604                 info[idx] = (struct field_modify_info){1, 0,
1605                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1606                 if (mask)
1607                         mask[idx] = 0xff >> (8 - width);
1608                 break;
1609         case RTE_FLOW_FIELD_IPV6_SRC:
1610                 if (mask) {
1611                         if (data->offset < 32) {
1612                                 info[idx] = (struct field_modify_info){4,
1613                                                 4 * idx,
1614                                                 MLX5_MODI_OUT_SIPV6_31_0};
1615                                 if (width < 32) {
1616                                         mask[idx] =
1617                                                 rte_cpu_to_be_32(0xffffffff >>
1618                                                                  (32 - width));
1619                                         width = 0;
1620                                 } else {
1621                                         mask[idx] = RTE_BE32(0xffffffff);
1622                                         width -= 32;
1623                                 }
1624                                 if (!width)
1625                                         break;
1626                                 ++idx;
1627                         }
1628                         if (data->offset < 64) {
1629                                 info[idx] = (struct field_modify_info){4,
1630                                                 4 * idx,
1631                                                 MLX5_MODI_OUT_SIPV6_63_32};
1632                                 if (width < 32) {
1633                                         mask[idx] =
1634                                                 rte_cpu_to_be_32(0xffffffff >>
1635                                                                  (32 - width));
1636                                         width = 0;
1637                                 } else {
1638                                         mask[idx] = RTE_BE32(0xffffffff);
1639                                         width -= 32;
1640                                 }
1641                                 if (!width)
1642                                         break;
1643                                 ++idx;
1644                         }
1645                         if (data->offset < 96) {
1646                                 info[idx] = (struct field_modify_info){4,
1647                                                 4 * idx,
1648                                                 MLX5_MODI_OUT_SIPV6_95_64};
1649                                 if (width < 32) {
1650                                         mask[idx] =
1651                                                 rte_cpu_to_be_32(0xffffffff >>
1652                                                                  (32 - width));
1653                                         width = 0;
1654                                 } else {
1655                                         mask[idx] = RTE_BE32(0xffffffff);
1656                                         width -= 32;
1657                                 }
1658                                 if (!width)
1659                                         break;
1660                                 ++idx;
1661                         }
1662                         info[idx] = (struct field_modify_info){4, 4 * idx,
1663                                                 MLX5_MODI_OUT_SIPV6_127_96};
1664                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1665                                                      (32 - width));
1666                 } else {
1667                         if (data->offset < 32)
1668                                 info[idx++] = (struct field_modify_info){4, 0,
1669                                                 MLX5_MODI_OUT_SIPV6_31_0};
1670                         if (data->offset < 64)
1671                                 info[idx++] = (struct field_modify_info){4, 0,
1672                                                 MLX5_MODI_OUT_SIPV6_63_32};
1673                         if (data->offset < 96)
1674                                 info[idx++] = (struct field_modify_info){4, 0,
1675                                                 MLX5_MODI_OUT_SIPV6_95_64};
1676                         if (data->offset < 128)
1677                                 info[idx++] = (struct field_modify_info){4, 0,
1678                                                 MLX5_MODI_OUT_SIPV6_127_96};
1679                 }
1680                 break;
1681         case RTE_FLOW_FIELD_IPV6_DST:
1682                 if (mask) {
1683                         if (data->offset < 32) {
1684                                 info[idx] = (struct field_modify_info){4,
1685                                                 4 * idx,
1686                                                 MLX5_MODI_OUT_DIPV6_31_0};
1687                                 if (width < 32) {
1688                                         mask[idx] =
1689                                                 rte_cpu_to_be_32(0xffffffff >>
1690                                                                  (32 - width));
1691                                         width = 0;
1692                                 } else {
1693                                         mask[idx] = RTE_BE32(0xffffffff);
1694                                         width -= 32;
1695                                 }
1696                                 if (!width)
1697                                         break;
1698                                 ++idx;
1699                         }
1700                         if (data->offset < 64) {
1701                                 info[idx] = (struct field_modify_info){4,
1702                                                 4 * idx,
1703                                                 MLX5_MODI_OUT_DIPV6_63_32};
1704                                 if (width < 32) {
1705                                         mask[idx] =
1706                                                 rte_cpu_to_be_32(0xffffffff >>
1707                                                                  (32 - width));
1708                                         width = 0;
1709                                 } else {
1710                                         mask[idx] = RTE_BE32(0xffffffff);
1711                                         width -= 32;
1712                                 }
1713                                 if (!width)
1714                                         break;
1715                                 ++idx;
1716                         }
1717                         if (data->offset < 96) {
1718                                 info[idx] = (struct field_modify_info){4,
1719                                                 4 * idx,
1720                                                 MLX5_MODI_OUT_DIPV6_95_64};
1721                                 if (width < 32) {
1722                                         mask[idx] =
1723                                                 rte_cpu_to_be_32(0xffffffff >>
1724                                                                  (32 - width));
1725                                         width = 0;
1726                                 } else {
1727                                         mask[idx] = RTE_BE32(0xffffffff);
1728                                         width -= 32;
1729                                 }
1730                                 if (!width)
1731                                         break;
1732                                 ++idx;
1733                         }
1734                         info[idx] = (struct field_modify_info){4, 4 * idx,
1735                                                 MLX5_MODI_OUT_DIPV6_127_96};
1736                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1737                                                      (32 - width));
1738                 } else {
1739                         if (data->offset < 32)
1740                                 info[idx++] = (struct field_modify_info){4, 0,
1741                                                 MLX5_MODI_OUT_DIPV6_31_0};
1742                         if (data->offset < 64)
1743                                 info[idx++] = (struct field_modify_info){4, 0,
1744                                                 MLX5_MODI_OUT_DIPV6_63_32};
1745                         if (data->offset < 96)
1746                                 info[idx++] = (struct field_modify_info){4, 0,
1747                                                 MLX5_MODI_OUT_DIPV6_95_64};
1748                         if (data->offset < 128)
1749                                 info[idx++] = (struct field_modify_info){4, 0,
1750                                                 MLX5_MODI_OUT_DIPV6_127_96};
1751                 }
1752                 break;
1753         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1754                 info[idx] = (struct field_modify_info){2, 0,
1755                                         MLX5_MODI_OUT_TCP_SPORT};
1756                 if (mask)
1757                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1758                 break;
1759         case RTE_FLOW_FIELD_TCP_PORT_DST:
1760                 info[idx] = (struct field_modify_info){2, 0,
1761                                         MLX5_MODI_OUT_TCP_DPORT};
1762                 if (mask)
1763                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1764                 break;
1765         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1766                 info[idx] = (struct field_modify_info){4, 0,
1767                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1768                 if (mask)
1769                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1770                                                      (32 - width));
1771                 break;
1772         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1773                 info[idx] = (struct field_modify_info){4, 0,
1774                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1775                 if (mask)
1776                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1777                                                      (32 - width));
1778                 break;
1779         case RTE_FLOW_FIELD_TCP_FLAGS:
1780                 info[idx] = (struct field_modify_info){2, 0,
1781                                         MLX5_MODI_OUT_TCP_FLAGS};
1782                 if (mask)
1783                         mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
1784                 break;
1785         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1786                 info[idx] = (struct field_modify_info){2, 0,
1787                                         MLX5_MODI_OUT_UDP_SPORT};
1788                 if (mask)
1789                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1790                 break;
1791         case RTE_FLOW_FIELD_UDP_PORT_DST:
1792                 info[idx] = (struct field_modify_info){2, 0,
1793                                         MLX5_MODI_OUT_UDP_DPORT};
1794                 if (mask)
1795                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1796                 break;
1797         case RTE_FLOW_FIELD_VXLAN_VNI:
1798                 /* not supported yet */
1799                 break;
1800         case RTE_FLOW_FIELD_GENEVE_VNI:
1801                 /* not supported yet*/
1802                 break;
1803         case RTE_FLOW_FIELD_GTP_TEID:
1804                 info[idx] = (struct field_modify_info){4, 0,
1805                                         MLX5_MODI_GTP_TEID};
1806                 if (mask)
1807                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1808                                                      (32 - width));
1809                 break;
1810         case RTE_FLOW_FIELD_TAG:
1811                 {
1812                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1813                                                    data->level, error);
1814                         if (reg < 0)
1815                                 return;
1816                         MLX5_ASSERT(reg != REG_NON);
1817                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1818                         info[idx] = (struct field_modify_info){4, 0,
1819                                                 reg_to_field[reg]};
1820                         if (mask)
1821                                 mask[idx] =
1822                                         rte_cpu_to_be_32(0xffffffff >>
1823                                                          (32 - width));
1824                 }
1825                 break;
1826         case RTE_FLOW_FIELD_MARK:
1827                 {
1828                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1829                                                        0, error);
1830                         if (reg < 0)
1831                                 return;
1832                         MLX5_ASSERT(reg != REG_NON);
1833                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1834                         info[idx] = (struct field_modify_info){4, 0,
1835                                                 reg_to_field[reg]};
1836                         if (mask)
1837                                 mask[idx] =
1838                                         rte_cpu_to_be_32(0xffffffff >>
1839                                                          (32 - width));
1840                 }
1841                 break;
1842         case RTE_FLOW_FIELD_META:
1843                 {
1844                         unsigned int xmeta = config->dv_xmeta_en;
1845                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1846                         if (reg < 0)
1847                                 return;
1848                         MLX5_ASSERT(reg != REG_NON);
1849                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1850                         if (xmeta == MLX5_XMETA_MODE_META16) {
1851                                 info[idx] = (struct field_modify_info){2, 0,
1852                                                         reg_to_field[reg]};
1853                                 if (mask)
1854                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1855                                                                 (16 - width));
1856                         } else if (xmeta == MLX5_XMETA_MODE_META32) {
1857                                 info[idx] = (struct field_modify_info){4, 0,
1858                                                         reg_to_field[reg]};
1859                                 if (mask)
1860                                         mask[idx] =
1861                                                 rte_cpu_to_be_32(0xffffffff >>
1862                                                                 (32 - width));
1863                         } else {
1864                                 MLX5_ASSERT(false);
1865                         }
1866                 }
1867                 break;
1868         case RTE_FLOW_FIELD_POINTER:
1869         case RTE_FLOW_FIELD_VALUE:
1870                 if (data->field == RTE_FLOW_FIELD_POINTER)
1871                         memcpy(&val, (void *)(uintptr_t)data->value,
1872                                sizeof(uint64_t));
1873                 else
1874                         val = data->value;
1875                 for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
1876                         if (mask[idx]) {
1877                                 if (dst_width == 48) {
1878                                         /*special case for MAC addresses */
1879                                         value[idx] = rte_cpu_to_be_16(val);
1880                                         val >>= 16;
1881                                         dst_width -= 16;
1882                                 } else if (dst_width > 16) {
1883                                         value[idx] = rte_cpu_to_be_32(val);
1884                                         val >>= 32;
1885                                 } else if (dst_width > 8) {
1886                                         value[idx] = rte_cpu_to_be_16(val);
1887                                         val >>= 16;
1888                                 } else {
1889                                         value[idx] = (uint8_t)val;
1890                                         val >>= 8;
1891                                 }
1892                                 if (!val)
1893                                         break;
1894                         }
1895                 }
1896                 break;
1897         default:
1898                 MLX5_ASSERT(false);
1899                 break;
1900         }
1901 }
1902
1903 /**
1904  * Convert modify_field action to DV specification.
1905  *
1906  * @param[in] dev
1907  *   Pointer to the rte_eth_dev structure.
1908  * @param[in,out] resource
1909  *   Pointer to the modify-header resource.
1910  * @param[in] action
1911  *   Pointer to action specification.
1912  * @param[in] attr
1913  *   Attributes of flow that includes this item.
1914  * @param[out] error
1915  *   Pointer to the error structure.
1916  *
1917  * @return
1918  *   0 on success, a negative errno value otherwise and rte_errno is set.
1919  */
1920 static int
1921 flow_dv_convert_action_modify_field
1922                         (struct rte_eth_dev *dev,
1923                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1924                          const struct rte_flow_action *action,
1925                          const struct rte_flow_attr *attr,
1926                          struct rte_flow_error *error)
1927 {
1928         struct mlx5_priv *priv = dev->data->dev_private;
1929         struct mlx5_dev_config *config = &priv->config;
1930         const struct rte_flow_action_modify_field *conf =
1931                 (const struct rte_flow_action_modify_field *)(action->conf);
1932         struct rte_flow_item item;
1933         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1934                                                                 {0, 0, 0} };
1935         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1936                                                                 {0, 0, 0} };
1937         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1938         uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1939         uint32_t type;
1940         uint32_t dst_width = mlx5_flow_item_field_width(config,
1941                                                         conf->dst.field);
1942
1943         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1944                 conf->src.field == RTE_FLOW_FIELD_VALUE) {
1945                 type = MLX5_MODIFICATION_TYPE_SET;
1946                 /** For SET fill the destination field (field) first. */
1947                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1948                         value, conf->width, dst_width, dev, attr, error);
1949                 /** Then copy immediate value from source as per mask. */
1950                 mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask,
1951                         value, conf->width, dst_width, dev, attr, error);
1952                 item.spec = &value;
1953         } else {
1954                 type = MLX5_MODIFICATION_TYPE_COPY;
1955                 /** For COPY fill the destination field (dcopy) without mask. */
1956                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1957                         value, conf->width, dst_width, dev, attr, error);
1958                 /** Then construct the source field (field) with mask. */
1959                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1960                         value, conf->width, dst_width, dev, attr, error);
1961         }
1962         item.mask = &mask;
1963         return flow_dv_convert_modify_action(&item,
1964                         field, dcopy, resource, type, error);
1965 }
1966
1967 /**
1968  * Validate MARK item.
1969  *
1970  * @param[in] dev
1971  *   Pointer to the rte_eth_dev structure.
1972  * @param[in] item
1973  *   Item specification.
1974  * @param[in] attr
1975  *   Attributes of flow that includes this item.
1976  * @param[out] error
1977  *   Pointer to error structure.
1978  *
1979  * @return
1980  *   0 on success, a negative errno value otherwise and rte_errno is set.
1981  */
1982 static int
1983 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1984                            const struct rte_flow_item *item,
1985                            const struct rte_flow_attr *attr __rte_unused,
1986                            struct rte_flow_error *error)
1987 {
1988         struct mlx5_priv *priv = dev->data->dev_private;
1989         struct mlx5_dev_config *config = &priv->config;
1990         const struct rte_flow_item_mark *spec = item->spec;
1991         const struct rte_flow_item_mark *mask = item->mask;
1992         const struct rte_flow_item_mark nic_mask = {
1993                 .id = priv->sh->dv_mark_mask,
1994         };
1995         int ret;
1996
1997         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1998                 return rte_flow_error_set(error, ENOTSUP,
1999                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2000                                           "extended metadata feature"
2001                                           " isn't enabled");
2002         if (!mlx5_flow_ext_mreg_supported(dev))
2003                 return rte_flow_error_set(error, ENOTSUP,
2004                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2005                                           "extended metadata register"
2006                                           " isn't supported");
2007         if (!nic_mask.id)
2008                 return rte_flow_error_set(error, ENOTSUP,
2009                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2010                                           "extended metadata register"
2011                                           " isn't available");
2012         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2013         if (ret < 0)
2014                 return ret;
2015         if (!spec)
2016                 return rte_flow_error_set(error, EINVAL,
2017                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2018                                           item->spec,
2019                                           "data cannot be empty");
2020         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
2021                 return rte_flow_error_set(error, EINVAL,
2022                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2023                                           &spec->id,
2024                                           "mark id exceeds the limit");
2025         if (!mask)
2026                 mask = &nic_mask;
2027         if (!mask->id)
2028                 return rte_flow_error_set(error, EINVAL,
2029                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2030                                         "mask cannot be zero");
2031
2032         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2033                                         (const uint8_t *)&nic_mask,
2034                                         sizeof(struct rte_flow_item_mark),
2035                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2036         if (ret < 0)
2037                 return ret;
2038         return 0;
2039 }
2040
2041 /**
2042  * Validate META item.
2043  *
2044  * @param[in] dev
2045  *   Pointer to the rte_eth_dev structure.
2046  * @param[in] item
2047  *   Item specification.
2048  * @param[in] attr
2049  *   Attributes of flow that includes this item.
2050  * @param[out] error
2051  *   Pointer to error structure.
2052  *
2053  * @return
2054  *   0 on success, a negative errno value otherwise and rte_errno is set.
2055  */
2056 static int
2057 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
2058                            const struct rte_flow_item *item,
2059                            const struct rte_flow_attr *attr,
2060                            struct rte_flow_error *error)
2061 {
2062         struct mlx5_priv *priv = dev->data->dev_private;
2063         struct mlx5_dev_config *config = &priv->config;
2064         const struct rte_flow_item_meta *spec = item->spec;
2065         const struct rte_flow_item_meta *mask = item->mask;
2066         struct rte_flow_item_meta nic_mask = {
2067                 .data = UINT32_MAX
2068         };
2069         int reg;
2070         int ret;
2071
2072         if (!spec)
2073                 return rte_flow_error_set(error, EINVAL,
2074                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2075                                           item->spec,
2076                                           "data cannot be empty");
2077         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2078                 if (!mlx5_flow_ext_mreg_supported(dev))
2079                         return rte_flow_error_set(error, ENOTSUP,
2080                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2081                                           "extended metadata register"
2082                                           " isn't supported");
2083                 reg = flow_dv_get_metadata_reg(dev, attr, error);
2084                 if (reg < 0)
2085                         return reg;
2086                 if (reg == REG_NON)
2087                         return rte_flow_error_set(error, ENOTSUP,
2088                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2089                                         "unavalable extended metadata register");
2090                 if (reg == REG_B)
2091                         return rte_flow_error_set(error, ENOTSUP,
2092                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2093                                           "match on reg_b "
2094                                           "isn't supported");
2095                 if (reg != REG_A)
2096                         nic_mask.data = priv->sh->dv_meta_mask;
2097         } else {
2098                 if (attr->transfer)
2099                         return rte_flow_error_set(error, ENOTSUP,
2100                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2101                                         "extended metadata feature "
2102                                         "should be enabled when "
2103                                         "meta item is requested "
2104                                         "with e-switch mode ");
2105                 if (attr->ingress)
2106                         return rte_flow_error_set(error, ENOTSUP,
2107                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2108                                         "match on metadata for ingress "
2109                                         "is not supported in legacy "
2110                                         "metadata mode");
2111         }
2112         if (!mask)
2113                 mask = &rte_flow_item_meta_mask;
2114         if (!mask->data)
2115                 return rte_flow_error_set(error, EINVAL,
2116                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2117                                         "mask cannot be zero");
2118
2119         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2120                                         (const uint8_t *)&nic_mask,
2121                                         sizeof(struct rte_flow_item_meta),
2122                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2123         return ret;
2124 }
2125
2126 /**
2127  * Validate TAG item.
2128  *
2129  * @param[in] dev
2130  *   Pointer to the rte_eth_dev structure.
2131  * @param[in] item
2132  *   Item specification.
2133  * @param[in] attr
2134  *   Attributes of flow that includes this item.
2135  * @param[out] error
2136  *   Pointer to error structure.
2137  *
2138  * @return
2139  *   0 on success, a negative errno value otherwise and rte_errno is set.
2140  */
2141 static int
2142 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2143                           const struct rte_flow_item *item,
2144                           const struct rte_flow_attr *attr __rte_unused,
2145                           struct rte_flow_error *error)
2146 {
2147         const struct rte_flow_item_tag *spec = item->spec;
2148         const struct rte_flow_item_tag *mask = item->mask;
2149         const struct rte_flow_item_tag nic_mask = {
2150                 .data = RTE_BE32(UINT32_MAX),
2151                 .index = 0xff,
2152         };
2153         int ret;
2154
2155         if (!mlx5_flow_ext_mreg_supported(dev))
2156                 return rte_flow_error_set(error, ENOTSUP,
2157                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2158                                           "extensive metadata register"
2159                                           " isn't supported");
2160         if (!spec)
2161                 return rte_flow_error_set(error, EINVAL,
2162                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2163                                           item->spec,
2164                                           "data cannot be empty");
2165         if (!mask)
2166                 mask = &rte_flow_item_tag_mask;
2167         if (!mask->data)
2168                 return rte_flow_error_set(error, EINVAL,
2169                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2170                                         "mask cannot be zero");
2171
2172         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2173                                         (const uint8_t *)&nic_mask,
2174                                         sizeof(struct rte_flow_item_tag),
2175                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2176         if (ret < 0)
2177                 return ret;
2178         if (mask->index != 0xff)
2179                 return rte_flow_error_set(error, EINVAL,
2180                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2181                                           "partial mask for tag index"
2182                                           " is not supported");
2183         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2184         if (ret < 0)
2185                 return ret;
2186         MLX5_ASSERT(ret != REG_NON);
2187         return 0;
2188 }
2189
2190 /**
2191  * Validate vport item.
2192  *
2193  * @param[in] dev
2194  *   Pointer to the rte_eth_dev structure.
2195  * @param[in] item
2196  *   Item specification.
2197  * @param[in] attr
2198  *   Attributes of flow that includes this item.
2199  * @param[in] item_flags
2200  *   Bit-fields that holds the items detected until now.
2201  * @param[out] error
2202  *   Pointer to error structure.
2203  *
2204  * @return
2205  *   0 on success, a negative errno value otherwise and rte_errno is set.
2206  */
2207 static int
2208 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2209                               const struct rte_flow_item *item,
2210                               const struct rte_flow_attr *attr,
2211                               uint64_t item_flags,
2212                               struct rte_flow_error *error)
2213 {
2214         const struct rte_flow_item_port_id *spec = item->spec;
2215         const struct rte_flow_item_port_id *mask = item->mask;
2216         const struct rte_flow_item_port_id switch_mask = {
2217                         .id = 0xffffffff,
2218         };
2219         struct mlx5_priv *esw_priv;
2220         struct mlx5_priv *dev_priv;
2221         int ret;
2222
2223         if (!attr->transfer)
2224                 return rte_flow_error_set(error, EINVAL,
2225                                           RTE_FLOW_ERROR_TYPE_ITEM,
2226                                           NULL,
2227                                           "match on port id is valid only"
2228                                           " when transfer flag is enabled");
2229         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2230                 return rte_flow_error_set(error, ENOTSUP,
2231                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2232                                           "multiple source ports are not"
2233                                           " supported");
2234         if (!mask)
2235                 mask = &switch_mask;
2236         if (mask->id != 0xffffffff)
2237                 return rte_flow_error_set(error, ENOTSUP,
2238                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2239                                            mask,
2240                                            "no support for partial mask on"
2241                                            " \"id\" field");
2242         ret = mlx5_flow_item_acceptable
2243                                 (item, (const uint8_t *)mask,
2244                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2245                                  sizeof(struct rte_flow_item_port_id),
2246                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2247         if (ret)
2248                 return ret;
2249         if (!spec)
2250                 return 0;
2251         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2252         if (!esw_priv)
2253                 return rte_flow_error_set(error, rte_errno,
2254                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2255                                           "failed to obtain E-Switch info for"
2256                                           " port");
2257         dev_priv = mlx5_dev_to_eswitch_info(dev);
2258         if (!dev_priv)
2259                 return rte_flow_error_set(error, rte_errno,
2260                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2261                                           NULL,
2262                                           "failed to obtain E-Switch info");
2263         if (esw_priv->domain_id != dev_priv->domain_id)
2264                 return rte_flow_error_set(error, EINVAL,
2265                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2266                                           "cannot match on a port from a"
2267                                           " different E-Switch");
2268         return 0;
2269 }
2270
2271 /**
2272  * Validate VLAN item.
2273  *
2274  * @param[in] item
2275  *   Item specification.
2276  * @param[in] item_flags
2277  *   Bit-fields that holds the items detected until now.
2278  * @param[in] dev
2279  *   Ethernet device flow is being created on.
2280  * @param[out] error
2281  *   Pointer to error structure.
2282  *
2283  * @return
2284  *   0 on success, a negative errno value otherwise and rte_errno is set.
2285  */
2286 static int
2287 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2288                            uint64_t item_flags,
2289                            struct rte_eth_dev *dev,
2290                            struct rte_flow_error *error)
2291 {
2292         const struct rte_flow_item_vlan *mask = item->mask;
2293         const struct rte_flow_item_vlan nic_mask = {
2294                 .tci = RTE_BE16(UINT16_MAX),
2295                 .inner_type = RTE_BE16(UINT16_MAX),
2296                 .has_more_vlan = 1,
2297         };
2298         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2299         int ret;
2300         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2301                                         MLX5_FLOW_LAYER_INNER_L4) :
2302                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2303                                         MLX5_FLOW_LAYER_OUTER_L4);
2304         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2305                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2306
2307         if (item_flags & vlanm)
2308                 return rte_flow_error_set(error, EINVAL,
2309                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2310                                           "multiple VLAN layers not supported");
2311         else if ((item_flags & l34m) != 0)
2312                 return rte_flow_error_set(error, EINVAL,
2313                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2314                                           "VLAN cannot follow L3/L4 layer");
2315         if (!mask)
2316                 mask = &rte_flow_item_vlan_mask;
2317         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2318                                         (const uint8_t *)&nic_mask,
2319                                         sizeof(struct rte_flow_item_vlan),
2320                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2321         if (ret)
2322                 return ret;
2323         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2324                 struct mlx5_priv *priv = dev->data->dev_private;
2325
2326                 if (priv->vmwa_context) {
2327                         /*
2328                          * Non-NULL context means we have a virtual machine
2329                          * and SR-IOV enabled, we have to create VLAN interface
2330                          * to make hypervisor to setup E-Switch vport
2331                          * context correctly. We avoid creating the multiple
2332                          * VLAN interfaces, so we cannot support VLAN tag mask.
2333                          */
2334                         return rte_flow_error_set(error, EINVAL,
2335                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2336                                                   item,
2337                                                   "VLAN tag mask is not"
2338                                                   " supported in virtual"
2339                                                   " environment");
2340                 }
2341         }
2342         return 0;
2343 }
2344
2345 /*
2346  * GTP flags are contained in 1 byte of the format:
2347  * -------------------------------------------
2348  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2349  * |-----------------------------------------|
2350  * | value | Version | PT | Res | E | S | PN |
2351  * -------------------------------------------
2352  *
2353  * Matching is supported only for GTP flags E, S, PN.
2354  */
2355 #define MLX5_GTP_FLAGS_MASK     0x07
2356
2357 /**
2358  * Validate GTP item.
2359  *
2360  * @param[in] dev
2361  *   Pointer to the rte_eth_dev structure.
2362  * @param[in] item
2363  *   Item specification.
2364  * @param[in] item_flags
2365  *   Bit-fields that holds the items detected until now.
2366  * @param[out] error
2367  *   Pointer to error structure.
2368  *
2369  * @return
2370  *   0 on success, a negative errno value otherwise and rte_errno is set.
2371  */
2372 static int
2373 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2374                           const struct rte_flow_item *item,
2375                           uint64_t item_flags,
2376                           struct rte_flow_error *error)
2377 {
2378         struct mlx5_priv *priv = dev->data->dev_private;
2379         const struct rte_flow_item_gtp *spec = item->spec;
2380         const struct rte_flow_item_gtp *mask = item->mask;
2381         const struct rte_flow_item_gtp nic_mask = {
2382                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2383                 .msg_type = 0xff,
2384                 .teid = RTE_BE32(0xffffffff),
2385         };
2386
2387         if (!priv->config.hca_attr.tunnel_stateless_gtp)
2388                 return rte_flow_error_set(error, ENOTSUP,
2389                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2390                                           "GTP support is not enabled");
2391         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2392                 return rte_flow_error_set(error, ENOTSUP,
2393                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2394                                           "multiple tunnel layers not"
2395                                           " supported");
2396         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2397                 return rte_flow_error_set(error, EINVAL,
2398                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2399                                           "no outer UDP layer found");
2400         if (!mask)
2401                 mask = &rte_flow_item_gtp_mask;
2402         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2403                 return rte_flow_error_set(error, ENOTSUP,
2404                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2405                                           "Match is supported for GTP"
2406                                           " flags only");
2407         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2408                                          (const uint8_t *)&nic_mask,
2409                                          sizeof(struct rte_flow_item_gtp),
2410                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2411 }
2412
2413 /**
2414  * Validate GTP PSC item.
2415  *
2416  * @param[in] item
2417  *   Item specification.
2418  * @param[in] last_item
2419  *   Previous validated item in the pattern items.
2420  * @param[in] gtp_item
2421  *   Previous GTP item specification.
2422  * @param[in] attr
2423  *   Pointer to flow attributes.
2424  * @param[out] error
2425  *   Pointer to error structure.
2426  *
2427  * @return
2428  *   0 on success, a negative errno value otherwise and rte_errno is set.
2429  */
2430 static int
2431 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2432                               uint64_t last_item,
2433                               const struct rte_flow_item *gtp_item,
2434                               const struct rte_flow_attr *attr,
2435                               struct rte_flow_error *error)
2436 {
2437         const struct rte_flow_item_gtp *gtp_spec;
2438         const struct rte_flow_item_gtp *gtp_mask;
2439         const struct rte_flow_item_gtp_psc *spec;
2440         const struct rte_flow_item_gtp_psc *mask;
2441         const struct rte_flow_item_gtp_psc nic_mask = {
2442                 .pdu_type = 0xFF,
2443                 .qfi = 0xFF,
2444         };
2445
2446         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2447                 return rte_flow_error_set
2448                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2449                          "GTP PSC item must be preceded with GTP item");
2450         gtp_spec = gtp_item->spec;
2451         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2452         /* GTP spec and E flag is requested to match zero. */
2453         if (gtp_spec &&
2454                 (gtp_mask->v_pt_rsv_flags &
2455                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2456                 return rte_flow_error_set
2457                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2458                          "GTP E flag must be 1 to match GTP PSC");
2459         /* Check the flow is not created in group zero. */
2460         if (!attr->transfer && !attr->group)
2461                 return rte_flow_error_set
2462                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2463                          "GTP PSC is not supported for group 0");
2464         /* GTP spec is here and E flag is requested to match zero. */
2465         if (!item->spec)
2466                 return 0;
2467         spec = item->spec;
2468         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2469         if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE)
2470                 return rte_flow_error_set
2471                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2472                          "PDU type should be smaller than 16");
2473         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2474                                          (const uint8_t *)&nic_mask,
2475                                          sizeof(struct rte_flow_item_gtp_psc),
2476                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2477 }
2478
2479 /**
2480  * Validate IPV4 item.
2481  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2482  * add specific validation of fragment_offset field,
2483  *
2484  * @param[in] item
2485  *   Item specification.
2486  * @param[in] item_flags
2487  *   Bit-fields that holds the items detected until now.
2488  * @param[out] error
2489  *   Pointer to error structure.
2490  *
2491  * @return
2492  *   0 on success, a negative errno value otherwise and rte_errno is set.
2493  */
2494 static int
2495 flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
2496                            const struct rte_flow_item *item,
2497                            uint64_t item_flags, uint64_t last_item,
2498                            uint16_t ether_type, struct rte_flow_error *error)
2499 {
2500         int ret;
2501         struct mlx5_priv *priv = dev->data->dev_private;
2502         const struct rte_flow_item_ipv4 *spec = item->spec;
2503         const struct rte_flow_item_ipv4 *last = item->last;
2504         const struct rte_flow_item_ipv4 *mask = item->mask;
2505         rte_be16_t fragment_offset_spec = 0;
2506         rte_be16_t fragment_offset_last = 0;
2507         struct rte_flow_item_ipv4 nic_ipv4_mask = {
2508                 .hdr = {
2509                         .src_addr = RTE_BE32(0xffffffff),
2510                         .dst_addr = RTE_BE32(0xffffffff),
2511                         .type_of_service = 0xff,
2512                         .fragment_offset = RTE_BE16(0xffff),
2513                         .next_proto_id = 0xff,
2514                         .time_to_live = 0xff,
2515                 },
2516         };
2517
2518         if (mask && (mask->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK)) {
2519                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2520                 bool ihl_cap = !tunnel ? priv->config.hca_attr.outer_ipv4_ihl :
2521                                priv->config.hca_attr.inner_ipv4_ihl;
2522                 if (!ihl_cap)
2523                         return rte_flow_error_set(error, ENOTSUP,
2524                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2525                                                   item,
2526                                                   "IPV4 ihl offload not supported");
2527                 nic_ipv4_mask.hdr.version_ihl = mask->hdr.version_ihl;
2528         }
2529         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2530                                            ether_type, &nic_ipv4_mask,
2531                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2532         if (ret < 0)
2533                 return ret;
2534         if (spec && mask)
2535                 fragment_offset_spec = spec->hdr.fragment_offset &
2536                                        mask->hdr.fragment_offset;
2537         if (!fragment_offset_spec)
2538                 return 0;
2539         /*
2540          * spec and mask are valid, enforce using full mask to make sure the
2541          * complete value is used correctly.
2542          */
2543         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2544                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2545                 return rte_flow_error_set(error, EINVAL,
2546                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2547                                           item, "must use full mask for"
2548                                           " fragment_offset");
2549         /*
2550          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2551          * indicating this is 1st fragment of fragmented packet.
2552          * This is not yet supported in MLX5, return appropriate error message.
2553          */
2554         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2555                 return rte_flow_error_set(error, ENOTSUP,
2556                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2557                                           "match on first fragment not "
2558                                           "supported");
2559         if (fragment_offset_spec && !last)
2560                 return rte_flow_error_set(error, ENOTSUP,
2561                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2562                                           "specified value not supported");
2563         /* spec and last are valid, validate the specified range. */
2564         fragment_offset_last = last->hdr.fragment_offset &
2565                                mask->hdr.fragment_offset;
2566         /*
2567          * Match on fragment_offset spec 0x2001 and last 0x3fff
2568          * means MF is 1 and frag-offset is > 0.
2569          * This packet is fragment 2nd and onward, excluding last.
2570          * This is not yet supported in MLX5, return appropriate
2571          * error message.
2572          */
2573         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2574             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2575                 return rte_flow_error_set(error, ENOTSUP,
2576                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2577                                           last, "match on following "
2578                                           "fragments not supported");
2579         /*
2580          * Match on fragment_offset spec 0x0001 and last 0x1fff
2581          * means MF is 0 and frag-offset is > 0.
2582          * This packet is last fragment of fragmented packet.
2583          * This is not yet supported in MLX5, return appropriate
2584          * error message.
2585          */
2586         if (fragment_offset_spec == RTE_BE16(1) &&
2587             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2588                 return rte_flow_error_set(error, ENOTSUP,
2589                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2590                                           last, "match on last "
2591                                           "fragment not supported");
2592         /*
2593          * Match on fragment_offset spec 0x0001 and last 0x3fff
2594          * means MF and/or frag-offset is not 0.
2595          * This is a fragmented packet.
2596          * Other range values are invalid and rejected.
2597          */
2598         if (!(fragment_offset_spec == RTE_BE16(1) &&
2599               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2600                 return rte_flow_error_set(error, ENOTSUP,
2601                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2602                                           "specified range not supported");
2603         return 0;
2604 }
2605
2606 /**
2607  * Validate IPV6 fragment extension item.
2608  *
2609  * @param[in] item
2610  *   Item specification.
2611  * @param[in] item_flags
2612  *   Bit-fields that holds the items detected until now.
2613  * @param[out] error
2614  *   Pointer to error structure.
2615  *
2616  * @return
2617  *   0 on success, a negative errno value otherwise and rte_errno is set.
2618  */
2619 static int
2620 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2621                                     uint64_t item_flags,
2622                                     struct rte_flow_error *error)
2623 {
2624         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2625         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2626         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2627         rte_be16_t frag_data_spec = 0;
2628         rte_be16_t frag_data_last = 0;
2629         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2630         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2631                                       MLX5_FLOW_LAYER_OUTER_L4;
2632         int ret = 0;
2633         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2634                 .hdr = {
2635                         .next_header = 0xff,
2636                         .frag_data = RTE_BE16(0xffff),
2637                 },
2638         };
2639
2640         if (item_flags & l4m)
2641                 return rte_flow_error_set(error, EINVAL,
2642                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2643                                           "ipv6 fragment extension item cannot "
2644                                           "follow L4 item.");
2645         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2646             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2647                 return rte_flow_error_set(error, EINVAL,
2648                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2649                                           "ipv6 fragment extension item must "
2650                                           "follow ipv6 item");
2651         if (spec && mask)
2652                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2653         if (!frag_data_spec)
2654                 return 0;
2655         /*
2656          * spec and mask are valid, enforce using full mask to make sure the
2657          * complete value is used correctly.
2658          */
2659         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2660                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2661                 return rte_flow_error_set(error, EINVAL,
2662                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2663                                           item, "must use full mask for"
2664                                           " frag_data");
2665         /*
2666          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2667          * This is 1st fragment of fragmented packet.
2668          */
2669         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2670                 return rte_flow_error_set(error, ENOTSUP,
2671                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2672                                           "match on first fragment not "
2673                                           "supported");
2674         if (frag_data_spec && !last)
2675                 return rte_flow_error_set(error, EINVAL,
2676                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2677                                           "specified value not supported");
2678         ret = mlx5_flow_item_acceptable
2679                                 (item, (const uint8_t *)mask,
2680                                  (const uint8_t *)&nic_mask,
2681                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2682                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2683         if (ret)
2684                 return ret;
2685         /* spec and last are valid, validate the specified range. */
2686         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2687         /*
2688          * Match on frag_data spec 0x0009 and last 0xfff9
2689          * means M is 1 and frag-offset is > 0.
2690          * This packet is fragment 2nd and onward, excluding last.
2691          * This is not yet supported in MLX5, return appropriate
2692          * error message.
2693          */
2694         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2695                                        RTE_IPV6_EHDR_MF_MASK) &&
2696             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2697                 return rte_flow_error_set(error, ENOTSUP,
2698                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2699                                           last, "match on following "
2700                                           "fragments not supported");
2701         /*
2702          * Match on frag_data spec 0x0008 and last 0xfff8
2703          * means M is 0 and frag-offset is > 0.
2704          * This packet is last fragment of fragmented packet.
2705          * This is not yet supported in MLX5, return appropriate
2706          * error message.
2707          */
2708         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2709             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2710                 return rte_flow_error_set(error, ENOTSUP,
2711                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2712                                           last, "match on last "
2713                                           "fragment not supported");
2714         /* Other range values are invalid and rejected. */
2715         return rte_flow_error_set(error, EINVAL,
2716                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2717                                   "specified range not supported");
2718 }
2719
2720 /*
2721  * Validate ASO CT item.
2722  *
2723  * @param[in] dev
2724  *   Pointer to the rte_eth_dev structure.
2725  * @param[in] item
2726  *   Item specification.
2727  * @param[in] item_flags
2728  *   Pointer to bit-fields that holds the items detected until now.
2729  * @param[out] error
2730  *   Pointer to error structure.
2731  *
2732  * @return
2733  *   0 on success, a negative errno value otherwise and rte_errno is set.
2734  */
2735 static int
2736 flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
2737                              const struct rte_flow_item *item,
2738                              uint64_t *item_flags,
2739                              struct rte_flow_error *error)
2740 {
2741         const struct rte_flow_item_conntrack *spec = item->spec;
2742         const struct rte_flow_item_conntrack *mask = item->mask;
2743         RTE_SET_USED(dev);
2744         uint32_t flags;
2745
2746         if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
2747                 return rte_flow_error_set(error, EINVAL,
2748                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2749                                           "Only one CT is supported");
2750         if (!mask)
2751                 mask = &rte_flow_item_conntrack_mask;
2752         flags = spec->flags & mask->flags;
2753         if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
2754             ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
2755              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
2756              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
2757                 return rte_flow_error_set(error, EINVAL,
2758                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2759                                           "Conflict status bits");
2760         /* State change also needs to be considered. */
2761         *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
2762         return 0;
2763 }
2764
2765 /**
2766  * Validate the pop VLAN action.
2767  *
2768  * @param[in] dev
2769  *   Pointer to the rte_eth_dev structure.
2770  * @param[in] action_flags
2771  *   Holds the actions detected until now.
2772  * @param[in] action
2773  *   Pointer to the pop vlan action.
2774  * @param[in] item_flags
2775  *   The items found in this flow rule.
2776  * @param[in] attr
2777  *   Pointer to flow attributes.
2778  * @param[out] error
2779  *   Pointer to error structure.
2780  *
2781  * @return
2782  *   0 on success, a negative errno value otherwise and rte_errno is set.
2783  */
2784 static int
2785 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2786                                  uint64_t action_flags,
2787                                  const struct rte_flow_action *action,
2788                                  uint64_t item_flags,
2789                                  const struct rte_flow_attr *attr,
2790                                  struct rte_flow_error *error)
2791 {
2792         const struct mlx5_priv *priv = dev->data->dev_private;
2793
2794         (void)action;
2795         (void)attr;
2796         if (!priv->sh->pop_vlan_action)
2797                 return rte_flow_error_set(error, ENOTSUP,
2798                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2799                                           NULL,
2800                                           "pop vlan action is not supported");
2801         if (attr->egress)
2802                 return rte_flow_error_set(error, ENOTSUP,
2803                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2804                                           NULL,
2805                                           "pop vlan action not supported for "
2806                                           "egress");
2807         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2808                 return rte_flow_error_set(error, ENOTSUP,
2809                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2810                                           "no support for multiple VLAN "
2811                                           "actions");
2812         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2813         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2814             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2815                 return rte_flow_error_set(error, ENOTSUP,
2816                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2817                                           NULL,
2818                                           "cannot pop vlan after decap without "
2819                                           "match on inner vlan in the flow");
2820         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2821         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2822             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2823                 return rte_flow_error_set(error, ENOTSUP,
2824                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2825                                           NULL,
2826                                           "cannot pop vlan without a "
2827                                           "match on (outer) vlan in the flow");
2828         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2829                 return rte_flow_error_set(error, EINVAL,
2830                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2831                                           "wrong action order, port_id should "
2832                                           "be after pop VLAN action");
2833         if (!attr->transfer && priv->representor)
2834                 return rte_flow_error_set(error, ENOTSUP,
2835                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2836                                           "pop vlan action for VF representor "
2837                                           "not supported on NIC table");
2838         return 0;
2839 }
2840
2841 /**
2842  * Get VLAN default info from vlan match info.
2843  *
2844  * @param[in] items
2845  *   the list of item specifications.
2846  * @param[out] vlan
2847  *   pointer VLAN info to fill to.
2848  *
2849  * @return
2850  *   0 on success, a negative errno value otherwise and rte_errno is set.
2851  */
2852 static void
2853 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2854                                   struct rte_vlan_hdr *vlan)
2855 {
2856         const struct rte_flow_item_vlan nic_mask = {
2857                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2858                                 MLX5DV_FLOW_VLAN_VID_MASK),
2859                 .inner_type = RTE_BE16(0xffff),
2860         };
2861
2862         if (items == NULL)
2863                 return;
2864         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2865                 int type = items->type;
2866
2867                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2868                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2869                         break;
2870         }
2871         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2872                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2873                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2874
2875                 /* If VLAN item in pattern doesn't contain data, return here. */
2876                 if (!vlan_v)
2877                         return;
2878                 if (!vlan_m)
2879                         vlan_m = &nic_mask;
2880                 /* Only full match values are accepted */
2881                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2882                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2883                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2884                         vlan->vlan_tci |=
2885                                 rte_be_to_cpu_16(vlan_v->tci &
2886                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2887                 }
2888                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2889                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2890                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2891                         vlan->vlan_tci |=
2892                                 rte_be_to_cpu_16(vlan_v->tci &
2893                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2894                 }
2895                 if (vlan_m->inner_type == nic_mask.inner_type)
2896                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2897                                                            vlan_m->inner_type);
2898         }
2899 }
2900
2901 /**
2902  * Validate the push VLAN action.
2903  *
2904  * @param[in] dev
2905  *   Pointer to the rte_eth_dev structure.
2906  * @param[in] action_flags
2907  *   Holds the actions detected until now.
2908  * @param[in] item_flags
2909  *   The items found in this flow rule.
2910  * @param[in] action
2911  *   Pointer to the action structure.
2912  * @param[in] attr
2913  *   Pointer to flow attributes
2914  * @param[out] error
2915  *   Pointer to error structure.
2916  *
2917  * @return
2918  *   0 on success, a negative errno value otherwise and rte_errno is set.
2919  */
2920 static int
2921 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2922                                   uint64_t action_flags,
2923                                   const struct rte_flow_item_vlan *vlan_m,
2924                                   const struct rte_flow_action *action,
2925                                   const struct rte_flow_attr *attr,
2926                                   struct rte_flow_error *error)
2927 {
2928         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2929         const struct mlx5_priv *priv = dev->data->dev_private;
2930
2931         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2932             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2933                 return rte_flow_error_set(error, EINVAL,
2934                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2935                                           "invalid vlan ethertype");
2936         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2937                 return rte_flow_error_set(error, EINVAL,
2938                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2939                                           "wrong action order, port_id should "
2940                                           "be after push VLAN");
2941         if (!attr->transfer && priv->representor)
2942                 return rte_flow_error_set(error, ENOTSUP,
2943                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2944                                           "push vlan action for VF representor "
2945                                           "not supported on NIC table");
2946         if (vlan_m &&
2947             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2948             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2949                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2950             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2951             !(mlx5_flow_find_action
2952                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2953                 return rte_flow_error_set(error, EINVAL,
2954                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2955                                           "not full match mask on VLAN PCP and "
2956                                           "there is no of_set_vlan_pcp action, "
2957                                           "push VLAN action cannot figure out "
2958                                           "PCP value");
2959         if (vlan_m &&
2960             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2961             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2962                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2963             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2964             !(mlx5_flow_find_action
2965                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2966                 return rte_flow_error_set(error, EINVAL,
2967                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2968                                           "not full match mask on VLAN VID and "
2969                                           "there is no of_set_vlan_vid action, "
2970                                           "push VLAN action cannot figure out "
2971                                           "VID value");
2972         (void)attr;
2973         return 0;
2974 }
2975
2976 /**
2977  * Validate the set VLAN PCP.
2978  *
2979  * @param[in] action_flags
2980  *   Holds the actions detected until now.
2981  * @param[in] actions
2982  *   Pointer to the list of actions remaining in the flow rule.
2983  * @param[out] error
2984  *   Pointer to error structure.
2985  *
2986  * @return
2987  *   0 on success, a negative errno value otherwise and rte_errno is set.
2988  */
2989 static int
2990 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2991                                      const struct rte_flow_action actions[],
2992                                      struct rte_flow_error *error)
2993 {
2994         const struct rte_flow_action *action = actions;
2995         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2996
2997         if (conf->vlan_pcp > 7)
2998                 return rte_flow_error_set(error, EINVAL,
2999                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3000                                           "VLAN PCP value is too big");
3001         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
3002                 return rte_flow_error_set(error, ENOTSUP,
3003                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3004                                           "set VLAN PCP action must follow "
3005                                           "the push VLAN action");
3006         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
3007                 return rte_flow_error_set(error, ENOTSUP,
3008                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3009                                           "Multiple VLAN PCP modification are "
3010                                           "not supported");
3011         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
3012                 return rte_flow_error_set(error, EINVAL,
3013                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3014                                           "wrong action order, port_id should "
3015                                           "be after set VLAN PCP");
3016         return 0;
3017 }
3018
3019 /**
3020  * Validate the set VLAN VID.
3021  *
3022  * @param[in] item_flags
3023  *   Holds the items detected in this rule.
3024  * @param[in] action_flags
3025  *   Holds the actions detected until now.
3026  * @param[in] actions
3027  *   Pointer to the list of actions remaining in the flow rule.
3028  * @param[out] error
3029  *   Pointer to error structure.
3030  *
3031  * @return
3032  *   0 on success, a negative errno value otherwise and rte_errno is set.
3033  */
3034 static int
3035 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
3036                                      uint64_t action_flags,
3037                                      const struct rte_flow_action actions[],
3038                                      struct rte_flow_error *error)
3039 {
3040         const struct rte_flow_action *action = actions;
3041         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
3042
3043         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
3044                 return rte_flow_error_set(error, EINVAL,
3045                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3046                                           "VLAN VID value is too big");
3047         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
3048             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
3049                 return rte_flow_error_set(error, ENOTSUP,
3050                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3051                                           "set VLAN VID action must follow push"
3052                                           " VLAN action or match on VLAN item");
3053         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
3054                 return rte_flow_error_set(error, ENOTSUP,
3055                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3056                                           "Multiple VLAN VID modifications are "
3057                                           "not supported");
3058         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
3059                 return rte_flow_error_set(error, EINVAL,
3060                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3061                                           "wrong action order, port_id should "
3062                                           "be after set VLAN VID");
3063         return 0;
3064 }
3065
3066 /*
3067  * Validate the FLAG action.
3068  *
3069  * @param[in] dev
3070  *   Pointer to the rte_eth_dev structure.
3071  * @param[in] action_flags
3072  *   Holds the actions detected until now.
3073  * @param[in] attr
3074  *   Pointer to flow attributes
3075  * @param[out] error
3076  *   Pointer to error structure.
3077  *
3078  * @return
3079  *   0 on success, a negative errno value otherwise and rte_errno is set.
3080  */
3081 static int
3082 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
3083                              uint64_t action_flags,
3084                              const struct rte_flow_attr *attr,
3085                              struct rte_flow_error *error)
3086 {
3087         struct mlx5_priv *priv = dev->data->dev_private;
3088         struct mlx5_dev_config *config = &priv->config;
3089         int ret;
3090
3091         /* Fall back if no extended metadata register support. */
3092         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3093                 return mlx5_flow_validate_action_flag(action_flags, attr,
3094                                                       error);
3095         /* Extensive metadata mode requires registers. */
3096         if (!mlx5_flow_ext_mreg_supported(dev))
3097                 return rte_flow_error_set(error, ENOTSUP,
3098                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3099                                           "no metadata registers "
3100                                           "to support flag action");
3101         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
3102                 return rte_flow_error_set(error, ENOTSUP,
3103                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3104                                           "extended metadata register"
3105                                           " isn't available");
3106         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3107         if (ret < 0)
3108                 return ret;
3109         MLX5_ASSERT(ret > 0);
3110         if (action_flags & MLX5_FLOW_ACTION_MARK)
3111                 return rte_flow_error_set(error, EINVAL,
3112                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3113                                           "can't mark and flag in same flow");
3114         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3115                 return rte_flow_error_set(error, EINVAL,
3116                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3117                                           "can't have 2 flag"
3118                                           " actions in same flow");
3119         return 0;
3120 }
3121
3122 /**
3123  * Validate MARK action.
3124  *
3125  * @param[in] dev
3126  *   Pointer to the rte_eth_dev structure.
3127  * @param[in] action
3128  *   Pointer to action.
3129  * @param[in] action_flags
3130  *   Holds the actions detected until now.
3131  * @param[in] attr
3132  *   Pointer to flow attributes
3133  * @param[out] error
3134  *   Pointer to error structure.
3135  *
3136  * @return
3137  *   0 on success, a negative errno value otherwise and rte_errno is set.
3138  */
3139 static int
3140 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
3141                              const struct rte_flow_action *action,
3142                              uint64_t action_flags,
3143                              const struct rte_flow_attr *attr,
3144                              struct rte_flow_error *error)
3145 {
3146         struct mlx5_priv *priv = dev->data->dev_private;
3147         struct mlx5_dev_config *config = &priv->config;
3148         const struct rte_flow_action_mark *mark = action->conf;
3149         int ret;
3150
3151         if (is_tunnel_offload_active(dev))
3152                 return rte_flow_error_set(error, ENOTSUP,
3153                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3154                                           "no mark action "
3155                                           "if tunnel offload active");
3156         /* Fall back if no extended metadata register support. */
3157         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3158                 return mlx5_flow_validate_action_mark(action, action_flags,
3159                                                       attr, error);
3160         /* Extensive metadata mode requires registers. */
3161         if (!mlx5_flow_ext_mreg_supported(dev))
3162                 return rte_flow_error_set(error, ENOTSUP,
3163                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3164                                           "no metadata registers "
3165                                           "to support mark action");
3166         if (!priv->sh->dv_mark_mask)
3167                 return rte_flow_error_set(error, ENOTSUP,
3168                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3169                                           "extended metadata register"
3170                                           " isn't available");
3171         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3172         if (ret < 0)
3173                 return ret;
3174         MLX5_ASSERT(ret > 0);
3175         if (!mark)
3176                 return rte_flow_error_set(error, EINVAL,
3177                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3178                                           "configuration cannot be null");
3179         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3180                 return rte_flow_error_set(error, EINVAL,
3181                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3182                                           &mark->id,
3183                                           "mark id exceeds the limit");
3184         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3185                 return rte_flow_error_set(error, EINVAL,
3186                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3187                                           "can't flag and mark in same flow");
3188         if (action_flags & MLX5_FLOW_ACTION_MARK)
3189                 return rte_flow_error_set(error, EINVAL,
3190                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3191                                           "can't have 2 mark actions in same"
3192                                           " flow");
3193         return 0;
3194 }
3195
3196 /**
3197  * Validate SET_META action.
3198  *
3199  * @param[in] dev
3200  *   Pointer to the rte_eth_dev structure.
3201  * @param[in] action
3202  *   Pointer to the action structure.
3203  * @param[in] action_flags
3204  *   Holds the actions detected until now.
3205  * @param[in] attr
3206  *   Pointer to flow attributes
3207  * @param[out] error
3208  *   Pointer to error structure.
3209  *
3210  * @return
3211  *   0 on success, a negative errno value otherwise and rte_errno is set.
3212  */
3213 static int
3214 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3215                                  const struct rte_flow_action *action,
3216                                  uint64_t action_flags __rte_unused,
3217                                  const struct rte_flow_attr *attr,
3218                                  struct rte_flow_error *error)
3219 {
3220         const struct rte_flow_action_set_meta *conf;
3221         uint32_t nic_mask = UINT32_MAX;
3222         int reg;
3223
3224         if (!mlx5_flow_ext_mreg_supported(dev))
3225                 return rte_flow_error_set(error, ENOTSUP,
3226                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3227                                           "extended metadata register"
3228                                           " isn't supported");
3229         reg = flow_dv_get_metadata_reg(dev, attr, error);
3230         if (reg < 0)
3231                 return reg;
3232         if (reg == REG_NON)
3233                 return rte_flow_error_set(error, ENOTSUP,
3234                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3235                                           "unavalable extended metadata register");
3236         if (reg != REG_A && reg != REG_B) {
3237                 struct mlx5_priv *priv = dev->data->dev_private;
3238
3239                 nic_mask = priv->sh->dv_meta_mask;
3240         }
3241         if (!(action->conf))
3242                 return rte_flow_error_set(error, EINVAL,
3243                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3244                                           "configuration cannot be null");
3245         conf = (const struct rte_flow_action_set_meta *)action->conf;
3246         if (!conf->mask)
3247                 return rte_flow_error_set(error, EINVAL,
3248                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3249                                           "zero mask doesn't have any effect");
3250         if (conf->mask & ~nic_mask)
3251                 return rte_flow_error_set(error, EINVAL,
3252                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3253                                           "meta data must be within reg C0");
3254         return 0;
3255 }
3256
3257 /**
3258  * Validate SET_TAG action.
3259  *
3260  * @param[in] dev
3261  *   Pointer to the rte_eth_dev structure.
3262  * @param[in] action
3263  *   Pointer to the action structure.
3264  * @param[in] action_flags
3265  *   Holds the actions detected until now.
3266  * @param[in] attr
3267  *   Pointer to flow attributes
3268  * @param[out] error
3269  *   Pointer to error structure.
3270  *
3271  * @return
3272  *   0 on success, a negative errno value otherwise and rte_errno is set.
3273  */
3274 static int
3275 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3276                                 const struct rte_flow_action *action,
3277                                 uint64_t action_flags,
3278                                 const struct rte_flow_attr *attr,
3279                                 struct rte_flow_error *error)
3280 {
3281         const struct rte_flow_action_set_tag *conf;
3282         const uint64_t terminal_action_flags =
3283                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3284                 MLX5_FLOW_ACTION_RSS;
3285         int ret;
3286
3287         if (!mlx5_flow_ext_mreg_supported(dev))
3288                 return rte_flow_error_set(error, ENOTSUP,
3289                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3290                                           "extensive metadata register"
3291                                           " isn't supported");
3292         if (!(action->conf))
3293                 return rte_flow_error_set(error, EINVAL,
3294                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3295                                           "configuration cannot be null");
3296         conf = (const struct rte_flow_action_set_tag *)action->conf;
3297         if (!conf->mask)
3298                 return rte_flow_error_set(error, EINVAL,
3299                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3300                                           "zero mask doesn't have any effect");
3301         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3302         if (ret < 0)
3303                 return ret;
3304         if (!attr->transfer && attr->ingress &&
3305             (action_flags & terminal_action_flags))
3306                 return rte_flow_error_set(error, EINVAL,
3307                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3308                                           "set_tag has no effect"
3309                                           " with terminal actions");
3310         return 0;
3311 }
3312
3313 /**
3314  * Check if action counter is shared by either old or new mechanism.
3315  *
3316  * @param[in] action
3317  *   Pointer to the action structure.
3318  *
3319  * @return
3320  *   True when counter is shared, false otherwise.
3321  */
3322 static inline bool
3323 is_shared_action_count(const struct rte_flow_action *action)
3324 {
3325         const struct rte_flow_action_count *count =
3326                         (const struct rte_flow_action_count *)action->conf;
3327
3328         if ((int)action->type == MLX5_RTE_FLOW_ACTION_TYPE_COUNT)
3329                 return true;
3330         return !!(count && count->shared);
3331 }
3332
3333 /**
3334  * Validate count action.
3335  *
3336  * @param[in] dev
3337  *   Pointer to rte_eth_dev structure.
3338  * @param[in] shared
3339  *   Indicator if action is shared.
3340  * @param[in] action_flags
3341  *   Holds the actions detected until now.
3342  * @param[out] error
3343  *   Pointer to error structure.
3344  *
3345  * @return
3346  *   0 on success, a negative errno value otherwise and rte_errno is set.
3347  */
3348 static int
3349 flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
3350                               uint64_t action_flags,
3351                               struct rte_flow_error *error)
3352 {
3353         struct mlx5_priv *priv = dev->data->dev_private;
3354
3355         if (!priv->config.devx)
3356                 goto notsup_err;
3357         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3358                 return rte_flow_error_set(error, EINVAL,
3359                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3360                                           "duplicate count actions set");
3361         if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3362             !priv->sh->flow_hit_aso_en)
3363                 return rte_flow_error_set(error, EINVAL,
3364                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3365                                           "old age and shared count combination is not supported");
3366 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3367         return 0;
3368 #endif
3369 notsup_err:
3370         return rte_flow_error_set
3371                       (error, ENOTSUP,
3372                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3373                        NULL,
3374                        "count action not supported");
3375 }
3376
3377 /**
3378  * Validate the L2 encap action.
3379  *
3380  * @param[in] dev
3381  *   Pointer to the rte_eth_dev structure.
3382  * @param[in] action_flags
3383  *   Holds the actions detected until now.
3384  * @param[in] action
3385  *   Pointer to the action structure.
3386  * @param[in] attr
3387  *   Pointer to flow attributes.
3388  * @param[out] error
3389  *   Pointer to error structure.
3390  *
3391  * @return
3392  *   0 on success, a negative errno value otherwise and rte_errno is set.
3393  */
3394 static int
3395 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3396                                  uint64_t action_flags,
3397                                  const struct rte_flow_action *action,
3398                                  const struct rte_flow_attr *attr,
3399                                  struct rte_flow_error *error)
3400 {
3401         const struct mlx5_priv *priv = dev->data->dev_private;
3402
3403         if (!(action->conf))
3404                 return rte_flow_error_set(error, EINVAL,
3405                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3406                                           "configuration cannot be null");
3407         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3408                 return rte_flow_error_set(error, EINVAL,
3409                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3410                                           "can only have a single encap action "
3411                                           "in a flow");
3412         if (!attr->transfer && priv->representor)
3413                 return rte_flow_error_set(error, ENOTSUP,
3414                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3415                                           "encap action for VF representor "
3416                                           "not supported on NIC table");
3417         return 0;
3418 }
3419
3420 /**
3421  * Validate a decap action.
3422  *
3423  * @param[in] dev
3424  *   Pointer to the rte_eth_dev structure.
3425  * @param[in] action_flags
3426  *   Holds the actions detected until now.
3427  * @param[in] action
3428  *   Pointer to the action structure.
3429  * @param[in] item_flags
3430  *   Holds the items detected.
3431  * @param[in] attr
3432  *   Pointer to flow attributes
3433  * @param[out] error
3434  *   Pointer to error structure.
3435  *
3436  * @return
3437  *   0 on success, a negative errno value otherwise and rte_errno is set.
3438  */
3439 static int
3440 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3441                               uint64_t action_flags,
3442                               const struct rte_flow_action *action,
3443                               const uint64_t item_flags,
3444                               const struct rte_flow_attr *attr,
3445                               struct rte_flow_error *error)
3446 {
3447         const struct mlx5_priv *priv = dev->data->dev_private;
3448
3449         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
3450             !priv->config.decap_en)
3451                 return rte_flow_error_set(error, ENOTSUP,
3452                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3453                                           "decap is not enabled");
3454         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3455                 return rte_flow_error_set(error, ENOTSUP,
3456                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3457                                           action_flags &
3458                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3459                                           "have a single decap action" : "decap "
3460                                           "after encap is not supported");
3461         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3462                 return rte_flow_error_set(error, EINVAL,
3463                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3464                                           "can't have decap action after"
3465                                           " modify action");
3466         if (attr->egress)
3467                 return rte_flow_error_set(error, ENOTSUP,
3468                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3469                                           NULL,
3470                                           "decap action not supported for "
3471                                           "egress");
3472         if (!attr->transfer && priv->representor)
3473                 return rte_flow_error_set(error, ENOTSUP,
3474                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3475                                           "decap action for VF representor "
3476                                           "not supported on NIC table");
3477         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3478             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3479                 return rte_flow_error_set(error, ENOTSUP,
3480                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3481                                 "VXLAN item should be present for VXLAN decap");
3482         return 0;
3483 }
3484
3485 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3486
3487 /**
3488  * Validate the raw encap and decap actions.
3489  *
3490  * @param[in] dev
3491  *   Pointer to the rte_eth_dev structure.
3492  * @param[in] decap
3493  *   Pointer to the decap action.
3494  * @param[in] encap
3495  *   Pointer to the encap action.
3496  * @param[in] attr
3497  *   Pointer to flow attributes
3498  * @param[in/out] action_flags
3499  *   Holds the actions detected until now.
3500  * @param[out] actions_n
3501  *   pointer to the number of actions counter.
3502  * @param[in] action
3503  *   Pointer to the action structure.
3504  * @param[in] item_flags
3505  *   Holds the items detected.
3506  * @param[out] error
3507  *   Pointer to error structure.
3508  *
3509  * @return
3510  *   0 on success, a negative errno value otherwise and rte_errno is set.
3511  */
3512 static int
3513 flow_dv_validate_action_raw_encap_decap
3514         (struct rte_eth_dev *dev,
3515          const struct rte_flow_action_raw_decap *decap,
3516          const struct rte_flow_action_raw_encap *encap,
3517          const struct rte_flow_attr *attr, uint64_t *action_flags,
3518          int *actions_n, const struct rte_flow_action *action,
3519          uint64_t item_flags, struct rte_flow_error *error)
3520 {
3521         const struct mlx5_priv *priv = dev->data->dev_private;
3522         int ret;
3523
3524         if (encap && (!encap->size || !encap->data))
3525                 return rte_flow_error_set(error, EINVAL,
3526                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3527                                           "raw encap data cannot be empty");
3528         if (decap && encap) {
3529                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3530                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3531                         /* L3 encap. */
3532                         decap = NULL;
3533                 else if (encap->size <=
3534                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3535                            decap->size >
3536                            MLX5_ENCAPSULATION_DECISION_SIZE)
3537                         /* L3 decap. */
3538                         encap = NULL;
3539                 else if (encap->size >
3540                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3541                            decap->size >
3542                            MLX5_ENCAPSULATION_DECISION_SIZE)
3543                         /* 2 L2 actions: encap and decap. */
3544                         ;
3545                 else
3546                         return rte_flow_error_set(error,
3547                                 ENOTSUP,
3548                                 RTE_FLOW_ERROR_TYPE_ACTION,
3549                                 NULL, "unsupported too small "
3550                                 "raw decap and too small raw "
3551                                 "encap combination");
3552         }
3553         if (decap) {
3554                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3555                                                     item_flags, attr, error);
3556                 if (ret < 0)
3557                         return ret;
3558                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3559                 ++(*actions_n);
3560         }
3561         if (encap) {
3562                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3563                         return rte_flow_error_set(error, ENOTSUP,
3564                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3565                                                   NULL,
3566                                                   "small raw encap size");
3567                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3568                         return rte_flow_error_set(error, EINVAL,
3569                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3570                                                   NULL,
3571                                                   "more than one encap action");
3572                 if (!attr->transfer && priv->representor)
3573                         return rte_flow_error_set
3574                                         (error, ENOTSUP,
3575                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3576                                          "encap action for VF representor "
3577                                          "not supported on NIC table");
3578                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3579                 ++(*actions_n);
3580         }
3581         return 0;
3582 }
3583
3584 /*
3585  * Validate the ASO CT action.
3586  *
3587  * @param[in] dev
3588  *   Pointer to the rte_eth_dev structure.
3589  * @param[in] action_flags
3590  *   Holds the actions detected until now.
3591  * @param[in] item_flags
3592  *   The items found in this flow rule.
3593  * @param[in] attr
3594  *   Pointer to flow attributes.
3595  * @param[out] error
3596  *   Pointer to error structure.
3597  *
3598  * @return
3599  *   0 on success, a negative errno value otherwise and rte_errno is set.
3600  */
3601 static int
3602 flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3603                                uint64_t action_flags,
3604                                uint64_t item_flags,
3605                                const struct rte_flow_attr *attr,
3606                                struct rte_flow_error *error)
3607 {
3608         RTE_SET_USED(dev);
3609
3610         if (attr->group == 0 && !attr->transfer)
3611                 return rte_flow_error_set(error, ENOTSUP,
3612                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3613                                           NULL,
3614                                           "Only support non-root table");
3615         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
3616                 return rte_flow_error_set(error, ENOTSUP,
3617                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3618                                           "CT cannot follow a fate action");
3619         if ((action_flags & MLX5_FLOW_ACTION_METER) ||
3620             (action_flags & MLX5_FLOW_ACTION_AGE))
3621                 return rte_flow_error_set(error, EINVAL,
3622                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3623                                           "Only one ASO action is supported");
3624         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3625                 return rte_flow_error_set(error, EINVAL,
3626                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3627                                           "Encap cannot exist before CT");
3628         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3629                 return rte_flow_error_set(error, EINVAL,
3630                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3631                                           "Not a outer TCP packet");
3632         return 0;
3633 }
3634
3635 int
3636 flow_dv_encap_decap_match_cb(void *tool_ctx __rte_unused,
3637                              struct mlx5_list_entry *entry, void *cb_ctx)
3638 {
3639         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3640         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3641         struct mlx5_flow_dv_encap_decap_resource *resource;
3642
3643         resource = container_of(entry, struct mlx5_flow_dv_encap_decap_resource,
3644                                 entry);
3645         if (resource->reformat_type == ctx_resource->reformat_type &&
3646             resource->ft_type == ctx_resource->ft_type &&
3647             resource->flags == ctx_resource->flags &&
3648             resource->size == ctx_resource->size &&
3649             !memcmp((const void *)resource->buf,
3650                     (const void *)ctx_resource->buf,
3651                     resource->size))
3652                 return 0;
3653         return -1;
3654 }
3655
3656 struct mlx5_list_entry *
3657 flow_dv_encap_decap_create_cb(void *tool_ctx, void *cb_ctx)
3658 {
3659         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3660         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3661         struct mlx5dv_dr_domain *domain;
3662         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3663         struct mlx5_flow_dv_encap_decap_resource *resource;
3664         uint32_t idx;
3665         int ret;
3666
3667         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3668                 domain = sh->fdb_domain;
3669         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3670                 domain = sh->rx_domain;
3671         else
3672                 domain = sh->tx_domain;
3673         /* Register new encap/decap resource. */
3674         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &idx);
3675         if (!resource) {
3676                 rte_flow_error_set(ctx->error, ENOMEM,
3677                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3678                                    "cannot allocate resource memory");
3679                 return NULL;
3680         }
3681         *resource = *ctx_resource;
3682         resource->idx = idx;
3683         ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->ctx, domain,
3684                                                               resource,
3685                                                              &resource->action);
3686         if (ret) {
3687                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3688                 rte_flow_error_set(ctx->error, ENOMEM,
3689                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3690                                    NULL, "cannot create action");
3691                 return NULL;
3692         }
3693
3694         return &resource->entry;
3695 }
3696
3697 struct mlx5_list_entry *
3698 flow_dv_encap_decap_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
3699                              void *cb_ctx)
3700 {
3701         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3702         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3703         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3704         uint32_t idx;
3705
3706         cache_resource = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3707                                            &idx);
3708         if (!cache_resource) {
3709                 rte_flow_error_set(ctx->error, ENOMEM,
3710                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3711                                    "cannot allocate resource memory");
3712                 return NULL;
3713         }
3714         memcpy(cache_resource, oentry, sizeof(*cache_resource));
3715         cache_resource->idx = idx;
3716         return &cache_resource->entry;
3717 }
3718
3719 void
3720 flow_dv_encap_decap_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3721 {
3722         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3723         struct mlx5_flow_dv_encap_decap_resource *res =
3724                                        container_of(entry, typeof(*res), entry);
3725
3726         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
3727 }
3728
3729 /**
3730  * Find existing encap/decap resource or create and register a new one.
3731  *
3732  * @param[in, out] dev
3733  *   Pointer to rte_eth_dev structure.
3734  * @param[in, out] resource
3735  *   Pointer to encap/decap resource.
3736  * @parm[in, out] dev_flow
3737  *   Pointer to the dev_flow.
3738  * @param[out] error
3739  *   pointer to error structure.
3740  *
3741  * @return
3742  *   0 on success otherwise -errno and errno is set.
3743  */
3744 static int
3745 flow_dv_encap_decap_resource_register
3746                         (struct rte_eth_dev *dev,
3747                          struct mlx5_flow_dv_encap_decap_resource *resource,
3748                          struct mlx5_flow *dev_flow,
3749                          struct rte_flow_error *error)
3750 {
3751         struct mlx5_priv *priv = dev->data->dev_private;
3752         struct mlx5_dev_ctx_shared *sh = priv->sh;
3753         struct mlx5_list_entry *entry;
3754         union {
3755                 struct {
3756                         uint32_t ft_type:8;
3757                         uint32_t refmt_type:8;
3758                         /*
3759                          * Header reformat actions can be shared between
3760                          * non-root tables. One bit to indicate non-root
3761                          * table or not.
3762                          */
3763                         uint32_t is_root:1;
3764                         uint32_t reserve:15;
3765                 };
3766                 uint32_t v32;
3767         } encap_decap_key = {
3768                 {
3769                         .ft_type = resource->ft_type,
3770                         .refmt_type = resource->reformat_type,
3771                         .is_root = !!dev_flow->dv.group,
3772                         .reserve = 0,
3773                 }
3774         };
3775         struct mlx5_flow_cb_ctx ctx = {
3776                 .error = error,
3777                 .data = resource,
3778         };
3779         struct mlx5_hlist *encaps_decaps;
3780         uint64_t key64;
3781
3782         encaps_decaps = flow_dv_hlist_prepare(sh, &sh->encaps_decaps,
3783                                 "encaps_decaps",
3784                                 MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ,
3785                                 true, true, sh,
3786                                 flow_dv_encap_decap_create_cb,
3787                                 flow_dv_encap_decap_match_cb,
3788                                 flow_dv_encap_decap_remove_cb,
3789                                 flow_dv_encap_decap_clone_cb,
3790                                 flow_dv_encap_decap_clone_free_cb);
3791         if (unlikely(!encaps_decaps))
3792                 return -rte_errno;
3793         resource->flags = dev_flow->dv.group ? 0 : 1;
3794         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3795                                  sizeof(encap_decap_key.v32), 0);
3796         if (resource->reformat_type !=
3797             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3798             resource->size)
3799                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3800         entry = mlx5_hlist_register(encaps_decaps, key64, &ctx);
3801         if (!entry)
3802                 return -rte_errno;
3803         resource = container_of(entry, typeof(*resource), entry);
3804         dev_flow->dv.encap_decap = resource;
3805         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3806         return 0;
3807 }
3808
3809 /**
3810  * Find existing table jump resource or create and register a new one.
3811  *
3812  * @param[in, out] dev
3813  *   Pointer to rte_eth_dev structure.
3814  * @param[in, out] tbl
3815  *   Pointer to flow table resource.
3816  * @parm[in, out] dev_flow
3817  *   Pointer to the dev_flow.
3818  * @param[out] error
3819  *   pointer to error structure.
3820  *
3821  * @return
3822  *   0 on success otherwise -errno and errno is set.
3823  */
3824 static int
3825 flow_dv_jump_tbl_resource_register
3826                         (struct rte_eth_dev *dev __rte_unused,
3827                          struct mlx5_flow_tbl_resource *tbl,
3828                          struct mlx5_flow *dev_flow,
3829                          struct rte_flow_error *error __rte_unused)
3830 {
3831         struct mlx5_flow_tbl_data_entry *tbl_data =
3832                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3833
3834         MLX5_ASSERT(tbl);
3835         MLX5_ASSERT(tbl_data->jump.action);
3836         dev_flow->handle->rix_jump = tbl_data->idx;
3837         dev_flow->dv.jump = &tbl_data->jump;
3838         return 0;
3839 }
3840
3841 int
3842 flow_dv_port_id_match_cb(void *tool_ctx __rte_unused,
3843                          struct mlx5_list_entry *entry, void *cb_ctx)
3844 {
3845         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3846         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3847         struct mlx5_flow_dv_port_id_action_resource *res =
3848                                        container_of(entry, typeof(*res), entry);
3849
3850         return ref->port_id != res->port_id;
3851 }
3852
3853 struct mlx5_list_entry *
3854 flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx)
3855 {
3856         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3857         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3858         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3859         struct mlx5_flow_dv_port_id_action_resource *resource;
3860         uint32_t idx;
3861         int ret;
3862
3863         /* Register new port id action resource. */
3864         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3865         if (!resource) {
3866                 rte_flow_error_set(ctx->error, ENOMEM,
3867                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3868                                    "cannot allocate port_id action memory");
3869                 return NULL;
3870         }
3871         *resource = *ref;
3872         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3873                                                         ref->port_id,
3874                                                         &resource->action);
3875         if (ret) {
3876                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3877                 rte_flow_error_set(ctx->error, ENOMEM,
3878                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3879                                    "cannot create action");
3880                 return NULL;
3881         }
3882         resource->idx = idx;
3883         return &resource->entry;
3884 }
3885
3886 struct mlx5_list_entry *
3887 flow_dv_port_id_clone_cb(void *tool_ctx,
3888                          struct mlx5_list_entry *entry __rte_unused,
3889                          void *cb_ctx)
3890 {
3891         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3892         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3893         struct mlx5_flow_dv_port_id_action_resource *resource;
3894         uint32_t idx;
3895
3896         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3897         if (!resource) {
3898                 rte_flow_error_set(ctx->error, ENOMEM,
3899                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3900                                    "cannot allocate port_id action memory");
3901                 return NULL;
3902         }
3903         memcpy(resource, entry, sizeof(*resource));
3904         resource->idx = idx;
3905         return &resource->entry;
3906 }
3907
3908 void
3909 flow_dv_port_id_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3910 {
3911         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3912         struct mlx5_flow_dv_port_id_action_resource *resource =
3913                                   container_of(entry, typeof(*resource), entry);
3914
3915         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
3916 }
3917
3918 /**
3919  * Find existing table port ID resource or create and register a new one.
3920  *
3921  * @param[in, out] dev
3922  *   Pointer to rte_eth_dev structure.
3923  * @param[in, out] ref
3924  *   Pointer to port ID action resource reference.
3925  * @parm[in, out] dev_flow
3926  *   Pointer to the dev_flow.
3927  * @param[out] error
3928  *   pointer to error structure.
3929  *
3930  * @return
3931  *   0 on success otherwise -errno and errno is set.
3932  */
3933 static int
3934 flow_dv_port_id_action_resource_register
3935                         (struct rte_eth_dev *dev,
3936                          struct mlx5_flow_dv_port_id_action_resource *ref,
3937                          struct mlx5_flow *dev_flow,
3938                          struct rte_flow_error *error)
3939 {
3940         struct mlx5_priv *priv = dev->data->dev_private;
3941         struct mlx5_list_entry *entry;
3942         struct mlx5_flow_dv_port_id_action_resource *resource;
3943         struct mlx5_flow_cb_ctx ctx = {
3944                 .error = error,
3945                 .data = ref,
3946         };
3947
3948         entry = mlx5_list_register(priv->sh->port_id_action_list, &ctx);
3949         if (!entry)
3950                 return -rte_errno;
3951         resource = container_of(entry, typeof(*resource), entry);
3952         dev_flow->dv.port_id_action = resource;
3953         dev_flow->handle->rix_port_id_action = resource->idx;
3954         return 0;
3955 }
3956
3957 int
3958 flow_dv_push_vlan_match_cb(void *tool_ctx __rte_unused,
3959                            struct mlx5_list_entry *entry, void *cb_ctx)
3960 {
3961         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3962         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3963         struct mlx5_flow_dv_push_vlan_action_resource *res =
3964                                        container_of(entry, typeof(*res), entry);
3965
3966         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3967 }
3968
3969 struct mlx5_list_entry *
3970 flow_dv_push_vlan_create_cb(void *tool_ctx, void *cb_ctx)
3971 {
3972         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3973         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3974         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3975         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3976         struct mlx5dv_dr_domain *domain;
3977         uint32_t idx;
3978         int ret;
3979
3980         /* Register new port id action resource. */
3981         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3982         if (!resource) {
3983                 rte_flow_error_set(ctx->error, ENOMEM,
3984                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3985                                    "cannot allocate push_vlan action memory");
3986                 return NULL;
3987         }
3988         *resource = *ref;
3989         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3990                 domain = sh->fdb_domain;
3991         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3992                 domain = sh->rx_domain;
3993         else
3994                 domain = sh->tx_domain;
3995         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3996                                                         &resource->action);
3997         if (ret) {
3998                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3999                 rte_flow_error_set(ctx->error, ENOMEM,
4000                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4001                                    "cannot create push vlan action");
4002                 return NULL;
4003         }
4004         resource->idx = idx;
4005         return &resource->entry;
4006 }
4007
4008 struct mlx5_list_entry *
4009 flow_dv_push_vlan_clone_cb(void *tool_ctx,
4010                            struct mlx5_list_entry *entry __rte_unused,
4011                            void *cb_ctx)
4012 {
4013         struct mlx5_dev_ctx_shared *sh = tool_ctx;
4014         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4015         struct mlx5_flow_dv_push_vlan_action_resource *resource;
4016         uint32_t idx;
4017
4018         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
4019         if (!resource) {
4020                 rte_flow_error_set(ctx->error, ENOMEM,
4021                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4022                                    "cannot allocate push_vlan action memory");
4023                 return NULL;
4024         }
4025         memcpy(resource, entry, sizeof(*resource));
4026         resource->idx = idx;
4027         return &resource->entry;
4028 }
4029
4030 void
4031 flow_dv_push_vlan_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
4032 {
4033         struct mlx5_dev_ctx_shared *sh = tool_ctx;
4034         struct mlx5_flow_dv_push_vlan_action_resource *resource =
4035                                   container_of(entry, typeof(*resource), entry);
4036
4037         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
4038 }
4039
4040 /**
4041  * Find existing push vlan resource or create and register a new one.
4042  *
4043  * @param [in, out] dev
4044  *   Pointer to rte_eth_dev structure.
4045  * @param[in, out] ref
4046  *   Pointer to port ID action resource reference.
4047  * @parm[in, out] dev_flow
4048  *   Pointer to the dev_flow.
4049  * @param[out] error
4050  *   pointer to error structure.
4051  *
4052  * @return
4053  *   0 on success otherwise -errno and errno is set.
4054  */
4055 static int
4056 flow_dv_push_vlan_action_resource_register
4057                        (struct rte_eth_dev *dev,
4058                         struct mlx5_flow_dv_push_vlan_action_resource *ref,
4059                         struct mlx5_flow *dev_flow,
4060                         struct rte_flow_error *error)
4061 {
4062         struct mlx5_priv *priv = dev->data->dev_private;
4063         struct mlx5_flow_dv_push_vlan_action_resource *resource;
4064         struct mlx5_list_entry *entry;
4065         struct mlx5_flow_cb_ctx ctx = {
4066                 .error = error,
4067                 .data = ref,
4068         };
4069
4070         entry = mlx5_list_register(priv->sh->push_vlan_action_list, &ctx);
4071         if (!entry)
4072                 return -rte_errno;
4073         resource = container_of(entry, typeof(*resource), entry);
4074
4075         dev_flow->handle->dvh.rix_push_vlan = resource->idx;
4076         dev_flow->dv.push_vlan_res = resource;
4077         return 0;
4078 }
4079
4080 /**
4081  * Get the size of specific rte_flow_item_type hdr size
4082  *
4083  * @param[in] item_type
4084  *   Tested rte_flow_item_type.
4085  *
4086  * @return
4087  *   sizeof struct item_type, 0 if void or irrelevant.
4088  */
4089 static size_t
4090 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
4091 {
4092         size_t retval;
4093
4094         switch (item_type) {
4095         case RTE_FLOW_ITEM_TYPE_ETH:
4096                 retval = sizeof(struct rte_ether_hdr);
4097                 break;
4098         case RTE_FLOW_ITEM_TYPE_VLAN:
4099                 retval = sizeof(struct rte_vlan_hdr);
4100                 break;
4101         case RTE_FLOW_ITEM_TYPE_IPV4:
4102                 retval = sizeof(struct rte_ipv4_hdr);
4103                 break;
4104         case RTE_FLOW_ITEM_TYPE_IPV6:
4105                 retval = sizeof(struct rte_ipv6_hdr);
4106                 break;
4107         case RTE_FLOW_ITEM_TYPE_UDP:
4108                 retval = sizeof(struct rte_udp_hdr);
4109                 break;
4110         case RTE_FLOW_ITEM_TYPE_TCP:
4111                 retval = sizeof(struct rte_tcp_hdr);
4112                 break;
4113         case RTE_FLOW_ITEM_TYPE_VXLAN:
4114         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4115                 retval = sizeof(struct rte_vxlan_hdr);
4116                 break;
4117         case RTE_FLOW_ITEM_TYPE_GRE:
4118         case RTE_FLOW_ITEM_TYPE_NVGRE:
4119                 retval = sizeof(struct rte_gre_hdr);
4120                 break;
4121         case RTE_FLOW_ITEM_TYPE_MPLS:
4122                 retval = sizeof(struct rte_mpls_hdr);
4123                 break;
4124         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
4125         default:
4126                 retval = 0;
4127                 break;
4128         }
4129         return retval;
4130 }
4131
4132 #define MLX5_ENCAP_IPV4_VERSION         0x40
4133 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
4134 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
4135 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
4136 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
4137 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
4138 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
4139
4140 /**
4141  * Convert the encap action data from list of rte_flow_item to raw buffer
4142  *
4143  * @param[in] items
4144  *   Pointer to rte_flow_item objects list.
4145  * @param[out] buf
4146  *   Pointer to the output buffer.
4147  * @param[out] size
4148  *   Pointer to the output buffer size.
4149  * @param[out] error
4150  *   Pointer to the error structure.
4151  *
4152  * @return
4153  *   0 on success, a negative errno value otherwise and rte_errno is set.
4154  */
4155 static int
4156 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
4157                            size_t *size, struct rte_flow_error *error)
4158 {
4159         struct rte_ether_hdr *eth = NULL;
4160         struct rte_vlan_hdr *vlan = NULL;
4161         struct rte_ipv4_hdr *ipv4 = NULL;
4162         struct rte_ipv6_hdr *ipv6 = NULL;
4163         struct rte_udp_hdr *udp = NULL;
4164         struct rte_vxlan_hdr *vxlan = NULL;
4165         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
4166         struct rte_gre_hdr *gre = NULL;
4167         size_t len;
4168         size_t temp_size = 0;
4169
4170         if (!items)
4171                 return rte_flow_error_set(error, EINVAL,
4172                                           RTE_FLOW_ERROR_TYPE_ACTION,
4173                                           NULL, "invalid empty data");
4174         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4175                 len = flow_dv_get_item_hdr_len(items->type);
4176                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
4177                         return rte_flow_error_set(error, EINVAL,
4178                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4179                                                   (void *)items->type,
4180                                                   "items total size is too big"
4181                                                   " for encap action");
4182                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
4183                 switch (items->type) {
4184                 case RTE_FLOW_ITEM_TYPE_ETH:
4185                         eth = (struct rte_ether_hdr *)&buf[temp_size];
4186                         break;
4187                 case RTE_FLOW_ITEM_TYPE_VLAN:
4188                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
4189                         if (!eth)
4190                                 return rte_flow_error_set(error, EINVAL,
4191                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4192                                                 (void *)items->type,
4193                                                 "eth header not found");
4194                         if (!eth->ether_type)
4195                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
4196                         break;
4197                 case RTE_FLOW_ITEM_TYPE_IPV4:
4198                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
4199                         if (!vlan && !eth)
4200                                 return rte_flow_error_set(error, EINVAL,
4201                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4202                                                 (void *)items->type,
4203                                                 "neither eth nor vlan"
4204                                                 " header found");
4205                         if (vlan && !vlan->eth_proto)
4206                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4207                         else if (eth && !eth->ether_type)
4208                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4209                         if (!ipv4->version_ihl)
4210                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
4211                                                     MLX5_ENCAP_IPV4_IHL_MIN;
4212                         if (!ipv4->time_to_live)
4213                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
4214                         break;
4215                 case RTE_FLOW_ITEM_TYPE_IPV6:
4216                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
4217                         if (!vlan && !eth)
4218                                 return rte_flow_error_set(error, EINVAL,
4219                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4220                                                 (void *)items->type,
4221                                                 "neither eth nor vlan"
4222                                                 " header found");
4223                         if (vlan && !vlan->eth_proto)
4224                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4225                         else if (eth && !eth->ether_type)
4226                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4227                         if (!ipv6->vtc_flow)
4228                                 ipv6->vtc_flow =
4229                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
4230                         if (!ipv6->hop_limits)
4231                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
4232                         break;
4233                 case RTE_FLOW_ITEM_TYPE_UDP:
4234                         udp = (struct rte_udp_hdr *)&buf[temp_size];
4235                         if (!ipv4 && !ipv6)
4236                                 return rte_flow_error_set(error, EINVAL,
4237                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4238                                                 (void *)items->type,
4239                                                 "ip header not found");
4240                         if (ipv4 && !ipv4->next_proto_id)
4241                                 ipv4->next_proto_id = IPPROTO_UDP;
4242                         else if (ipv6 && !ipv6->proto)
4243                                 ipv6->proto = IPPROTO_UDP;
4244                         break;
4245                 case RTE_FLOW_ITEM_TYPE_VXLAN:
4246                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
4247                         if (!udp)
4248                                 return rte_flow_error_set(error, EINVAL,
4249                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4250                                                 (void *)items->type,
4251                                                 "udp header not found");
4252                         if (!udp->dst_port)
4253                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
4254                         if (!vxlan->vx_flags)
4255                                 vxlan->vx_flags =
4256                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
4257                         break;
4258                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4259                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
4260                         if (!udp)
4261                                 return rte_flow_error_set(error, EINVAL,
4262                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4263                                                 (void *)items->type,
4264                                                 "udp header not found");
4265                         if (!vxlan_gpe->proto)
4266                                 return rte_flow_error_set(error, EINVAL,
4267                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4268                                                 (void *)items->type,
4269                                                 "next protocol not found");
4270                         if (!udp->dst_port)
4271                                 udp->dst_port =
4272                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
4273                         if (!vxlan_gpe->vx_flags)
4274                                 vxlan_gpe->vx_flags =
4275                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
4276                         break;
4277                 case RTE_FLOW_ITEM_TYPE_GRE:
4278                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4279                         gre = (struct rte_gre_hdr *)&buf[temp_size];
4280                         if (!gre->proto)
4281                                 return rte_flow_error_set(error, EINVAL,
4282                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4283                                                 (void *)items->type,
4284                                                 "next protocol not found");
4285                         if (!ipv4 && !ipv6)
4286                                 return rte_flow_error_set(error, EINVAL,
4287                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4288                                                 (void *)items->type,
4289                                                 "ip header not found");
4290                         if (ipv4 && !ipv4->next_proto_id)
4291                                 ipv4->next_proto_id = IPPROTO_GRE;
4292                         else if (ipv6 && !ipv6->proto)
4293                                 ipv6->proto = IPPROTO_GRE;
4294                         break;
4295                 case RTE_FLOW_ITEM_TYPE_VOID:
4296                         break;
4297                 default:
4298                         return rte_flow_error_set(error, EINVAL,
4299                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4300                                                   (void *)items->type,
4301                                                   "unsupported item type");
4302                         break;
4303                 }
4304                 temp_size += len;
4305         }
4306         *size = temp_size;
4307         return 0;
4308 }
4309
4310 static int
4311 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4312 {
4313         struct rte_ether_hdr *eth = NULL;
4314         struct rte_vlan_hdr *vlan = NULL;
4315         struct rte_ipv6_hdr *ipv6 = NULL;
4316         struct rte_udp_hdr *udp = NULL;
4317         char *next_hdr;
4318         uint16_t proto;
4319
4320         eth = (struct rte_ether_hdr *)data;
4321         next_hdr = (char *)(eth + 1);
4322         proto = RTE_BE16(eth->ether_type);
4323
4324         /* VLAN skipping */
4325         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4326                 vlan = (struct rte_vlan_hdr *)next_hdr;
4327                 proto = RTE_BE16(vlan->eth_proto);
4328                 next_hdr += sizeof(struct rte_vlan_hdr);
4329         }
4330
4331         /* HW calculates IPv4 csum. no need to proceed */
4332         if (proto == RTE_ETHER_TYPE_IPV4)
4333                 return 0;
4334
4335         /* non IPv4/IPv6 header. not supported */
4336         if (proto != RTE_ETHER_TYPE_IPV6) {
4337                 return rte_flow_error_set(error, ENOTSUP,
4338                                           RTE_FLOW_ERROR_TYPE_ACTION,
4339                                           NULL, "Cannot offload non IPv4/IPv6");
4340         }
4341
4342         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4343
4344         /* ignore non UDP */
4345         if (ipv6->proto != IPPROTO_UDP)
4346                 return 0;
4347
4348         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4349         udp->dgram_cksum = 0;
4350
4351         return 0;
4352 }
4353
4354 /**
4355  * Convert L2 encap action to DV specification.
4356  *
4357  * @param[in] dev
4358  *   Pointer to rte_eth_dev structure.
4359  * @param[in] action
4360  *   Pointer to action structure.
4361  * @param[in, out] dev_flow
4362  *   Pointer to the mlx5_flow.
4363  * @param[in] transfer
4364  *   Mark if the flow is E-Switch flow.
4365  * @param[out] error
4366  *   Pointer to the error structure.
4367  *
4368  * @return
4369  *   0 on success, a negative errno value otherwise and rte_errno is set.
4370  */
4371 static int
4372 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4373                                const struct rte_flow_action *action,
4374                                struct mlx5_flow *dev_flow,
4375                                uint8_t transfer,
4376                                struct rte_flow_error *error)
4377 {
4378         const struct rte_flow_item *encap_data;
4379         const struct rte_flow_action_raw_encap *raw_encap_data;
4380         struct mlx5_flow_dv_encap_decap_resource res = {
4381                 .reformat_type =
4382                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4383                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4384                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4385         };
4386
4387         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4388                 raw_encap_data =
4389                         (const struct rte_flow_action_raw_encap *)action->conf;
4390                 res.size = raw_encap_data->size;
4391                 memcpy(res.buf, raw_encap_data->data, res.size);
4392         } else {
4393                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4394                         encap_data =
4395                                 ((const struct rte_flow_action_vxlan_encap *)
4396                                                 action->conf)->definition;
4397                 else
4398                         encap_data =
4399                                 ((const struct rte_flow_action_nvgre_encap *)
4400                                                 action->conf)->definition;
4401                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4402                                                &res.size, error))
4403                         return -rte_errno;
4404         }
4405         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4406                 return -rte_errno;
4407         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4408                 return rte_flow_error_set(error, EINVAL,
4409                                           RTE_FLOW_ERROR_TYPE_ACTION,
4410                                           NULL, "can't create L2 encap action");
4411         return 0;
4412 }
4413
4414 /**
4415  * Convert L2 decap action to DV specification.
4416  *
4417  * @param[in] dev
4418  *   Pointer to rte_eth_dev structure.
4419  * @param[in, out] dev_flow
4420  *   Pointer to the mlx5_flow.
4421  * @param[in] transfer
4422  *   Mark if the flow is E-Switch flow.
4423  * @param[out] error
4424  *   Pointer to the error structure.
4425  *
4426  * @return
4427  *   0 on success, a negative errno value otherwise and rte_errno is set.
4428  */
4429 static int
4430 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4431                                struct mlx5_flow *dev_flow,
4432                                uint8_t transfer,
4433                                struct rte_flow_error *error)
4434 {
4435         struct mlx5_flow_dv_encap_decap_resource res = {
4436                 .size = 0,
4437                 .reformat_type =
4438                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4439                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4440                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4441         };
4442
4443         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4444                 return rte_flow_error_set(error, EINVAL,
4445                                           RTE_FLOW_ERROR_TYPE_ACTION,
4446                                           NULL, "can't create L2 decap action");
4447         return 0;
4448 }
4449
4450 /**
4451  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4452  *
4453  * @param[in] dev
4454  *   Pointer to rte_eth_dev structure.
4455  * @param[in] action
4456  *   Pointer to action structure.
4457  * @param[in, out] dev_flow
4458  *   Pointer to the mlx5_flow.
4459  * @param[in] attr
4460  *   Pointer to the flow attributes.
4461  * @param[out] error
4462  *   Pointer to the error structure.
4463  *
4464  * @return
4465  *   0 on success, a negative errno value otherwise and rte_errno is set.
4466  */
4467 static int
4468 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4469                                 const struct rte_flow_action *action,
4470                                 struct mlx5_flow *dev_flow,
4471                                 const struct rte_flow_attr *attr,
4472                                 struct rte_flow_error *error)
4473 {
4474         const struct rte_flow_action_raw_encap *encap_data;
4475         struct mlx5_flow_dv_encap_decap_resource res;
4476
4477         memset(&res, 0, sizeof(res));
4478         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4479         res.size = encap_data->size;
4480         memcpy(res.buf, encap_data->data, res.size);
4481         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4482                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4483                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4484         if (attr->transfer)
4485                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4486         else
4487                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4488                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4489         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4490                 return rte_flow_error_set(error, EINVAL,
4491                                           RTE_FLOW_ERROR_TYPE_ACTION,
4492                                           NULL, "can't create encap action");
4493         return 0;
4494 }
4495
4496 /**
4497  * Create action push VLAN.
4498  *
4499  * @param[in] dev
4500  *   Pointer to rte_eth_dev structure.
4501  * @param[in] attr
4502  *   Pointer to the flow attributes.
4503  * @param[in] vlan
4504  *   Pointer to the vlan to push to the Ethernet header.
4505  * @param[in, out] dev_flow
4506  *   Pointer to the mlx5_flow.
4507  * @param[out] error
4508  *   Pointer to the error structure.
4509  *
4510  * @return
4511  *   0 on success, a negative errno value otherwise and rte_errno is set.
4512  */
4513 static int
4514 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4515                                 const struct rte_flow_attr *attr,
4516                                 const struct rte_vlan_hdr *vlan,
4517                                 struct mlx5_flow *dev_flow,
4518                                 struct rte_flow_error *error)
4519 {
4520         struct mlx5_flow_dv_push_vlan_action_resource res;
4521
4522         memset(&res, 0, sizeof(res));
4523         res.vlan_tag =
4524                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4525                                  vlan->vlan_tci);
4526         if (attr->transfer)
4527                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4528         else
4529                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4530                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4531         return flow_dv_push_vlan_action_resource_register
4532                                             (dev, &res, dev_flow, error);
4533 }
4534
4535 /**
4536  * Validate the modify-header actions.
4537  *
4538  * @param[in] action_flags
4539  *   Holds the actions detected until now.
4540  * @param[in] action
4541  *   Pointer to the modify action.
4542  * @param[out] error
4543  *   Pointer to error structure.
4544  *
4545  * @return
4546  *   0 on success, a negative errno value otherwise and rte_errno is set.
4547  */
4548 static int
4549 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4550                                    const struct rte_flow_action *action,
4551                                    struct rte_flow_error *error)
4552 {
4553         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4554                 return rte_flow_error_set(error, EINVAL,
4555                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4556                                           NULL, "action configuration not set");
4557         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4558                 return rte_flow_error_set(error, EINVAL,
4559                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4560                                           "can't have encap action before"
4561                                           " modify action");
4562         return 0;
4563 }
4564
4565 /**
4566  * Validate the modify-header MAC address actions.
4567  *
4568  * @param[in] action_flags
4569  *   Holds the actions detected until now.
4570  * @param[in] action
4571  *   Pointer to the modify action.
4572  * @param[in] item_flags
4573  *   Holds the items detected.
4574  * @param[out] error
4575  *   Pointer to error structure.
4576  *
4577  * @return
4578  *   0 on success, a negative errno value otherwise and rte_errno is set.
4579  */
4580 static int
4581 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4582                                    const struct rte_flow_action *action,
4583                                    const uint64_t item_flags,
4584                                    struct rte_flow_error *error)
4585 {
4586         int ret = 0;
4587
4588         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4589         if (!ret) {
4590                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4591                         return rte_flow_error_set(error, EINVAL,
4592                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4593                                                   NULL,
4594                                                   "no L2 item in pattern");
4595         }
4596         return ret;
4597 }
4598
4599 /**
4600  * Validate the modify-header IPv4 address actions.
4601  *
4602  * @param[in] action_flags
4603  *   Holds the actions detected until now.
4604  * @param[in] action
4605  *   Pointer to the modify action.
4606  * @param[in] item_flags
4607  *   Holds the items detected.
4608  * @param[out] error
4609  *   Pointer to error structure.
4610  *
4611  * @return
4612  *   0 on success, a negative errno value otherwise and rte_errno is set.
4613  */
4614 static int
4615 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4616                                     const struct rte_flow_action *action,
4617                                     const uint64_t item_flags,
4618                                     struct rte_flow_error *error)
4619 {
4620         int ret = 0;
4621         uint64_t layer;
4622
4623         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4624         if (!ret) {
4625                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4626                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4627                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4628                 if (!(item_flags & layer))
4629                         return rte_flow_error_set(error, EINVAL,
4630                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4631                                                   NULL,
4632                                                   "no ipv4 item in pattern");
4633         }
4634         return ret;
4635 }
4636
4637 /**
4638  * Validate the modify-header IPv6 address actions.
4639  *
4640  * @param[in] action_flags
4641  *   Holds the actions detected until now.
4642  * @param[in] action
4643  *   Pointer to the modify action.
4644  * @param[in] item_flags
4645  *   Holds the items detected.
4646  * @param[out] error
4647  *   Pointer to error structure.
4648  *
4649  * @return
4650  *   0 on success, a negative errno value otherwise and rte_errno is set.
4651  */
4652 static int
4653 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4654                                     const struct rte_flow_action *action,
4655                                     const uint64_t item_flags,
4656                                     struct rte_flow_error *error)
4657 {
4658         int ret = 0;
4659         uint64_t layer;
4660
4661         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4662         if (!ret) {
4663                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4664                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4665                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4666                 if (!(item_flags & layer))
4667                         return rte_flow_error_set(error, EINVAL,
4668                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4669                                                   NULL,
4670                                                   "no ipv6 item in pattern");
4671         }
4672         return ret;
4673 }
4674
4675 /**
4676  * Validate the modify-header TP actions.
4677  *
4678  * @param[in] action_flags
4679  *   Holds the actions detected until now.
4680  * @param[in] action
4681  *   Pointer to the modify action.
4682  * @param[in] item_flags
4683  *   Holds the items detected.
4684  * @param[out] error
4685  *   Pointer to error structure.
4686  *
4687  * @return
4688  *   0 on success, a negative errno value otherwise and rte_errno is set.
4689  */
4690 static int
4691 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4692                                   const struct rte_flow_action *action,
4693                                   const uint64_t item_flags,
4694                                   struct rte_flow_error *error)
4695 {
4696         int ret = 0;
4697         uint64_t layer;
4698
4699         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4700         if (!ret) {
4701                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4702                                  MLX5_FLOW_LAYER_INNER_L4 :
4703                                  MLX5_FLOW_LAYER_OUTER_L4;
4704                 if (!(item_flags & layer))
4705                         return rte_flow_error_set(error, EINVAL,
4706                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4707                                                   NULL, "no transport layer "
4708                                                   "in pattern");
4709         }
4710         return ret;
4711 }
4712
4713 /**
4714  * Validate the modify-header actions of increment/decrement
4715  * TCP Sequence-number.
4716  *
4717  * @param[in] action_flags
4718  *   Holds the actions detected until now.
4719  * @param[in] action
4720  *   Pointer to the modify action.
4721  * @param[in] item_flags
4722  *   Holds the items detected.
4723  * @param[out] error
4724  *   Pointer to error structure.
4725  *
4726  * @return
4727  *   0 on success, a negative errno value otherwise and rte_errno is set.
4728  */
4729 static int
4730 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4731                                        const struct rte_flow_action *action,
4732                                        const uint64_t item_flags,
4733                                        struct rte_flow_error *error)
4734 {
4735         int ret = 0;
4736         uint64_t layer;
4737
4738         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4739         if (!ret) {
4740                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4741                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4742                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4743                 if (!(item_flags & layer))
4744                         return rte_flow_error_set(error, EINVAL,
4745                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4746                                                   NULL, "no TCP item in"
4747                                                   " pattern");
4748                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4749                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4750                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4751                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4752                         return rte_flow_error_set(error, EINVAL,
4753                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4754                                                   NULL,
4755                                                   "cannot decrease and increase"
4756                                                   " TCP sequence number"
4757                                                   " at the same time");
4758         }
4759         return ret;
4760 }
4761
4762 /**
4763  * Validate the modify-header actions of increment/decrement
4764  * TCP Acknowledgment number.
4765  *
4766  * @param[in] action_flags
4767  *   Holds the actions detected until now.
4768  * @param[in] action
4769  *   Pointer to the modify action.
4770  * @param[in] item_flags
4771  *   Holds the items detected.
4772  * @param[out] error
4773  *   Pointer to error structure.
4774  *
4775  * @return
4776  *   0 on success, a negative errno value otherwise and rte_errno is set.
4777  */
4778 static int
4779 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4780                                        const struct rte_flow_action *action,
4781                                        const uint64_t item_flags,
4782                                        struct rte_flow_error *error)
4783 {
4784         int ret = 0;
4785         uint64_t layer;
4786
4787         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4788         if (!ret) {
4789                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4790                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4791                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4792                 if (!(item_flags & layer))
4793                         return rte_flow_error_set(error, EINVAL,
4794                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4795                                                   NULL, "no TCP item in"
4796                                                   " pattern");
4797                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4798                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4799                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4800                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4801                         return rte_flow_error_set(error, EINVAL,
4802                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4803                                                   NULL,
4804                                                   "cannot decrease and increase"
4805                                                   " TCP acknowledgment number"
4806                                                   " at the same time");
4807         }
4808         return ret;
4809 }
4810
4811 /**
4812  * Validate the modify-header TTL actions.
4813  *
4814  * @param[in] action_flags
4815  *   Holds the actions detected until now.
4816  * @param[in] action
4817  *   Pointer to the modify action.
4818  * @param[in] item_flags
4819  *   Holds the items detected.
4820  * @param[out] error
4821  *   Pointer to error structure.
4822  *
4823  * @return
4824  *   0 on success, a negative errno value otherwise and rte_errno is set.
4825  */
4826 static int
4827 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4828                                    const struct rte_flow_action *action,
4829                                    const uint64_t item_flags,
4830                                    struct rte_flow_error *error)
4831 {
4832         int ret = 0;
4833         uint64_t layer;
4834
4835         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4836         if (!ret) {
4837                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4838                                  MLX5_FLOW_LAYER_INNER_L3 :
4839                                  MLX5_FLOW_LAYER_OUTER_L3;
4840                 if (!(item_flags & layer))
4841                         return rte_flow_error_set(error, EINVAL,
4842                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4843                                                   NULL,
4844                                                   "no IP protocol in pattern");
4845         }
4846         return ret;
4847 }
4848
4849 /**
4850  * Validate the generic modify field actions.
4851  * @param[in] dev
4852  *   Pointer to the rte_eth_dev structure.
4853  * @param[in] action_flags
4854  *   Holds the actions detected until now.
4855  * @param[in] action
4856  *   Pointer to the modify action.
4857  * @param[in] attr
4858  *   Pointer to the flow attributes.
4859  * @param[out] error
4860  *   Pointer to error structure.
4861  *
4862  * @return
4863  *   Number of header fields to modify (0 or more) on success,
4864  *   a negative errno value otherwise and rte_errno is set.
4865  */
4866 static int
4867 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4868                                    const uint64_t action_flags,
4869                                    const struct rte_flow_action *action,
4870                                    const struct rte_flow_attr *attr,
4871                                    struct rte_flow_error *error)
4872 {
4873         int ret = 0;
4874         struct mlx5_priv *priv = dev->data->dev_private;
4875         struct mlx5_dev_config *config = &priv->config;
4876         const struct rte_flow_action_modify_field *action_modify_field =
4877                 action->conf;
4878         uint32_t dst_width = mlx5_flow_item_field_width(config,
4879                                 action_modify_field->dst.field);
4880         uint32_t src_width = mlx5_flow_item_field_width(config,
4881                                 action_modify_field->src.field);
4882
4883         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4884         if (ret)
4885                 return ret;
4886
4887         if (action_modify_field->width == 0)
4888                 return rte_flow_error_set(error, EINVAL,
4889                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4890                                 "no bits are requested to be modified");
4891         else if (action_modify_field->width > dst_width ||
4892                  action_modify_field->width > src_width)
4893                 return rte_flow_error_set(error, EINVAL,
4894                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4895                                 "cannot modify more bits than"
4896                                 " the width of a field");
4897         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4898             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4899                 if ((action_modify_field->dst.offset +
4900                      action_modify_field->width > dst_width) ||
4901                     (action_modify_field->dst.offset % 32))
4902                         return rte_flow_error_set(error, EINVAL,
4903                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4904                                         "destination offset is too big"
4905                                         " or not aligned to 4 bytes");
4906                 if (action_modify_field->dst.level &&
4907                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4908                         return rte_flow_error_set(error, ENOTSUP,
4909                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4910                                         "inner header fields modification"
4911                                         " is not supported");
4912         }
4913         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4914             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4915                 if (!attr->transfer && !attr->group)
4916                         return rte_flow_error_set(error, ENOTSUP,
4917                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4918                                         "modify field action is not"
4919                                         " supported for group 0");
4920                 if ((action_modify_field->src.offset +
4921                      action_modify_field->width > src_width) ||
4922                     (action_modify_field->src.offset % 32))
4923                         return rte_flow_error_set(error, EINVAL,
4924                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4925                                         "source offset is too big"
4926                                         " or not aligned to 4 bytes");
4927                 if (action_modify_field->src.level &&
4928                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4929                         return rte_flow_error_set(error, ENOTSUP,
4930                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4931                                         "inner header fields modification"
4932                                         " is not supported");
4933         }
4934         if ((action_modify_field->dst.field ==
4935              action_modify_field->src.field) &&
4936             (action_modify_field->dst.level ==
4937              action_modify_field->src.level))
4938                 return rte_flow_error_set(error, EINVAL,
4939                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4940                                 "source and destination fields"
4941                                 " cannot be the same");
4942         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4943             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER)
4944                 return rte_flow_error_set(error, EINVAL,
4945                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4946                                 "immediate value or a pointer to it"
4947                                 " cannot be used as a destination");
4948         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4949             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4950                 return rte_flow_error_set(error, ENOTSUP,
4951                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4952                                 "modifications of an arbitrary"
4953                                 " place in a packet is not supported");
4954         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4955             action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4956                 return rte_flow_error_set(error, ENOTSUP,
4957                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4958                                 "modifications of the 802.1Q Tag"
4959                                 " Identifier is not supported");
4960         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4961             action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4962                 return rte_flow_error_set(error, ENOTSUP,
4963                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4964                                 "modifications of the VXLAN Network"
4965                                 " Identifier is not supported");
4966         if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4967             action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4968                 return rte_flow_error_set(error, ENOTSUP,
4969                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4970                                 "modifications of the GENEVE Network"
4971                                 " Identifier is not supported");
4972         if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4973             action_modify_field->src.field == RTE_FLOW_FIELD_MARK ||
4974             action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
4975             action_modify_field->src.field == RTE_FLOW_FIELD_META) {
4976                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4977                     !mlx5_flow_ext_mreg_supported(dev))
4978                         return rte_flow_error_set(error, ENOTSUP,
4979                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4980                                         "cannot modify mark or metadata without"
4981                                         " extended metadata register support");
4982         }
4983         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4984                 return rte_flow_error_set(error, ENOTSUP,
4985                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4986                                 "add and sub operations"
4987                                 " are not supported");
4988         return (action_modify_field->width / 32) +
4989                !!(action_modify_field->width % 32);
4990 }
4991
4992 /**
4993  * Validate jump action.
4994  *
4995  * @param[in] action
4996  *   Pointer to the jump action.
4997  * @param[in] action_flags
4998  *   Holds the actions detected until now.
4999  * @param[in] attributes
5000  *   Pointer to flow attributes
5001  * @param[in] external
5002  *   Action belongs to flow rule created by request external to PMD.
5003  * @param[out] error
5004  *   Pointer to error structure.
5005  *
5006  * @return
5007  *   0 on success, a negative errno value otherwise and rte_errno is set.
5008  */
5009 static int
5010 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
5011                              const struct mlx5_flow_tunnel *tunnel,
5012                              const struct rte_flow_action *action,
5013                              uint64_t action_flags,
5014                              const struct rte_flow_attr *attributes,
5015                              bool external, struct rte_flow_error *error)
5016 {
5017         uint32_t target_group, table;
5018         int ret = 0;
5019         struct flow_grp_info grp_info = {
5020                 .external = !!external,
5021                 .transfer = !!attributes->transfer,
5022                 .fdb_def_rule = 1,
5023                 .std_tbl_fix = 0
5024         };
5025         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
5026                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5027                 return rte_flow_error_set(error, EINVAL,
5028                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5029                                           "can't have 2 fate actions in"
5030                                           " same flow");
5031         if (!action->conf)
5032                 return rte_flow_error_set(error, EINVAL,
5033                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5034                                           NULL, "action configuration not set");
5035         target_group =
5036                 ((const struct rte_flow_action_jump *)action->conf)->group;
5037         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
5038                                        &grp_info, error);
5039         if (ret)
5040                 return ret;
5041         if (attributes->group == target_group &&
5042             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
5043                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
5044                 return rte_flow_error_set(error, EINVAL,
5045                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5046                                           "target group must be other than"
5047                                           " the current flow group");
5048         return 0;
5049 }
5050
5051 /*
5052  * Validate the port_id action.
5053  *
5054  * @param[in] dev
5055  *   Pointer to rte_eth_dev structure.
5056  * @param[in] action_flags
5057  *   Bit-fields that holds the actions detected until now.
5058  * @param[in] action
5059  *   Port_id RTE action structure.
5060  * @param[in] attr
5061  *   Attributes of flow that includes this action.
5062  * @param[out] error
5063  *   Pointer to error structure.
5064  *
5065  * @return
5066  *   0 on success, a negative errno value otherwise and rte_errno is set.
5067  */
5068 static int
5069 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
5070                                 uint64_t action_flags,
5071                                 const struct rte_flow_action *action,
5072                                 const struct rte_flow_attr *attr,
5073                                 struct rte_flow_error *error)
5074 {
5075         const struct rte_flow_action_port_id *port_id;
5076         struct mlx5_priv *act_priv;
5077         struct mlx5_priv *dev_priv;
5078         uint16_t port;
5079
5080         if (!attr->transfer)
5081                 return rte_flow_error_set(error, ENOTSUP,
5082                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5083                                           NULL,
5084                                           "port id action is valid in transfer"
5085                                           " mode only");
5086         if (!action || !action->conf)
5087                 return rte_flow_error_set(error, ENOTSUP,
5088                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5089                                           NULL,
5090                                           "port id action parameters must be"
5091                                           " specified");
5092         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
5093                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5094                 return rte_flow_error_set(error, EINVAL,
5095                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5096                                           "can have only one fate actions in"
5097                                           " a flow");
5098         dev_priv = mlx5_dev_to_eswitch_info(dev);
5099         if (!dev_priv)
5100                 return rte_flow_error_set(error, rte_errno,
5101                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5102                                           NULL,
5103                                           "failed to obtain E-Switch info");
5104         port_id = action->conf;
5105         port = port_id->original ? dev->data->port_id : port_id->id;
5106         act_priv = mlx5_port_to_eswitch_info(port, false);
5107         if (!act_priv)
5108                 return rte_flow_error_set
5109                                 (error, rte_errno,
5110                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
5111                                  "failed to obtain E-Switch port id for port");
5112         if (act_priv->domain_id != dev_priv->domain_id)
5113                 return rte_flow_error_set
5114                                 (error, EINVAL,
5115                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5116                                  "port does not belong to"
5117                                  " E-Switch being configured");
5118         return 0;
5119 }
5120
5121 /**
5122  * Get the maximum number of modify header actions.
5123  *
5124  * @param dev
5125  *   Pointer to rte_eth_dev structure.
5126  * @param root
5127  *   Whether action is on root table.
5128  *
5129  * @return
5130  *   Max number of modify header actions device can support.
5131  */
5132 static inline unsigned int
5133 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
5134                               bool root)
5135 {
5136         /*
5137          * There's no way to directly query the max capacity from FW.
5138          * The maximal value on root table should be assumed to be supported.
5139          */
5140         if (!root)
5141                 return MLX5_MAX_MODIFY_NUM;
5142         else
5143                 return MLX5_ROOT_TBL_MODIFY_NUM;
5144 }
5145
5146 /**
5147  * Validate the meter action.
5148  *
5149  * @param[in] dev
5150  *   Pointer to rte_eth_dev structure.
5151  * @param[in] action_flags
5152  *   Bit-fields that holds the actions detected until now.
5153  * @param[in] action
5154  *   Pointer to the meter action.
5155  * @param[in] attr
5156  *   Attributes of flow that includes this action.
5157  * @param[in] port_id_item
5158  *   Pointer to item indicating port id.
5159  * @param[out] error
5160  *   Pointer to error structure.
5161  *
5162  * @return
5163  *   0 on success, a negative errno value otherwise and rte_ernno is set.
5164  */
5165 static int
5166 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
5167                                 uint64_t action_flags,
5168                                 const struct rte_flow_action *action,
5169                                 const struct rte_flow_attr *attr,
5170                                 const struct rte_flow_item *port_id_item,
5171                                 bool *def_policy,
5172                                 struct rte_flow_error *error)
5173 {
5174         struct mlx5_priv *priv = dev->data->dev_private;
5175         const struct rte_flow_action_meter *am = action->conf;
5176         struct mlx5_flow_meter_info *fm;
5177         struct mlx5_flow_meter_policy *mtr_policy;
5178         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
5179
5180         if (!am)
5181                 return rte_flow_error_set(error, EINVAL,
5182                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5183                                           "meter action conf is NULL");
5184
5185         if (action_flags & MLX5_FLOW_ACTION_METER)
5186                 return rte_flow_error_set(error, ENOTSUP,
5187                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5188                                           "meter chaining not support");
5189         if (action_flags & MLX5_FLOW_ACTION_JUMP)
5190                 return rte_flow_error_set(error, ENOTSUP,
5191                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5192                                           "meter with jump not support");
5193         if (!priv->mtr_en)
5194                 return rte_flow_error_set(error, ENOTSUP,
5195                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5196                                           NULL,
5197                                           "meter action not supported");
5198         fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
5199         if (!fm)
5200                 return rte_flow_error_set(error, EINVAL,
5201                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5202                                           "Meter not found");
5203         /* aso meter can always be shared by different domains */
5204         if (fm->ref_cnt && !priv->sh->meter_aso_en &&
5205             !(fm->transfer == attr->transfer ||
5206               (!fm->ingress && !attr->ingress && attr->egress) ||
5207               (!fm->egress && !attr->egress && attr->ingress)))
5208                 return rte_flow_error_set(error, EINVAL,
5209                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5210                         "Flow attributes domain are either invalid "
5211                         "or have a domain conflict with current "
5212                         "meter attributes");
5213         if (fm->def_policy) {
5214                 if (!((attr->transfer &&
5215                         mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
5216                         (attr->egress &&
5217                         mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
5218                         (attr->ingress &&
5219                         mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
5220                         return rte_flow_error_set(error, EINVAL,
5221                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5222                                           "Flow attributes domain "
5223                                           "have a conflict with current "
5224                                           "meter domain attributes");
5225                 *def_policy = true;
5226         } else {
5227                 mtr_policy = mlx5_flow_meter_policy_find(dev,
5228                                                 fm->policy_id, NULL);
5229                 if (!mtr_policy)
5230                         return rte_flow_error_set(error, EINVAL,
5231                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5232                                           "Invalid policy id for meter ");
5233                 if (!((attr->transfer && mtr_policy->transfer) ||
5234                         (attr->egress && mtr_policy->egress) ||
5235                         (attr->ingress && mtr_policy->ingress)))
5236                         return rte_flow_error_set(error, EINVAL,
5237                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5238                                           "Flow attributes domain "
5239                                           "have a conflict with current "
5240                                           "meter domain attributes");
5241                 if (attr->transfer && mtr_policy->dev) {
5242                         /**
5243                          * When policy has fate action of port_id,
5244                          * the flow should have the same src port as policy.
5245                          */
5246                         struct mlx5_priv *policy_port_priv =
5247                                         mtr_policy->dev->data->dev_private;
5248                         int32_t flow_src_port = priv->representor_id;
5249
5250                         if (port_id_item) {
5251                                 const struct rte_flow_item_port_id *spec =
5252                                                         port_id_item->spec;
5253                                 struct mlx5_priv *port_priv =
5254                                         mlx5_port_to_eswitch_info(spec->id,
5255                                                                   false);
5256                                 if (!port_priv)
5257                                         return rte_flow_error_set(error,
5258                                                 rte_errno,
5259                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5260                                                 spec,
5261                                                 "Failed to get port info.");
5262                                 flow_src_port = port_priv->representor_id;
5263                         }
5264                         if (flow_src_port != policy_port_priv->representor_id)
5265                                 return rte_flow_error_set(error,
5266                                                 rte_errno,
5267                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5268                                                 NULL,
5269                                                 "Flow and meter policy "
5270                                                 "have different src port.");
5271                 }
5272                 *def_policy = false;
5273         }
5274         return 0;
5275 }
5276
5277 /**
5278  * Validate the age action.
5279  *
5280  * @param[in] action_flags
5281  *   Holds the actions detected until now.
5282  * @param[in] action
5283  *   Pointer to the age action.
5284  * @param[in] dev
5285  *   Pointer to the Ethernet device structure.
5286  * @param[out] error
5287  *   Pointer to error structure.
5288  *
5289  * @return
5290  *   0 on success, a negative errno value otherwise and rte_errno is set.
5291  */
5292 static int
5293 flow_dv_validate_action_age(uint64_t action_flags,
5294                             const struct rte_flow_action *action,
5295                             struct rte_eth_dev *dev,
5296                             struct rte_flow_error *error)
5297 {
5298         struct mlx5_priv *priv = dev->data->dev_private;
5299         const struct rte_flow_action_age *age = action->conf;
5300
5301         if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
5302             !priv->sh->aso_age_mng))
5303                 return rte_flow_error_set(error, ENOTSUP,
5304                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5305                                           NULL,
5306                                           "age action not supported");
5307         if (!(action->conf))
5308                 return rte_flow_error_set(error, EINVAL,
5309                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5310                                           "configuration cannot be null");
5311         if (!(age->timeout))
5312                 return rte_flow_error_set(error, EINVAL,
5313                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5314                                           "invalid timeout value 0");
5315         if (action_flags & MLX5_FLOW_ACTION_AGE)
5316                 return rte_flow_error_set(error, EINVAL,
5317                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5318                                           "duplicate age actions set");
5319         return 0;
5320 }
5321
5322 /**
5323  * Validate the modify-header IPv4 DSCP actions.
5324  *
5325  * @param[in] action_flags
5326  *   Holds the actions detected until now.
5327  * @param[in] action
5328  *   Pointer to the modify action.
5329  * @param[in] item_flags
5330  *   Holds the items detected.
5331  * @param[out] error
5332  *   Pointer to error structure.
5333  *
5334  * @return
5335  *   0 on success, a negative errno value otherwise and rte_errno is set.
5336  */
5337 static int
5338 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5339                                          const struct rte_flow_action *action,
5340                                          const uint64_t item_flags,
5341                                          struct rte_flow_error *error)
5342 {
5343         int ret = 0;
5344
5345         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5346         if (!ret) {
5347                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5348                         return rte_flow_error_set(error, EINVAL,
5349                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5350                                                   NULL,
5351                                                   "no ipv4 item in pattern");
5352         }
5353         return ret;
5354 }
5355
5356 /**
5357  * Validate the modify-header IPv6 DSCP actions.
5358  *
5359  * @param[in] action_flags
5360  *   Holds the actions detected until now.
5361  * @param[in] action
5362  *   Pointer to the modify action.
5363  * @param[in] item_flags
5364  *   Holds the items detected.
5365  * @param[out] error
5366  *   Pointer to error structure.
5367  *
5368  * @return
5369  *   0 on success, a negative errno value otherwise and rte_errno is set.
5370  */
5371 static int
5372 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5373                                          const struct rte_flow_action *action,
5374                                          const uint64_t item_flags,
5375                                          struct rte_flow_error *error)
5376 {
5377         int ret = 0;
5378
5379         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5380         if (!ret) {
5381                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5382                         return rte_flow_error_set(error, EINVAL,
5383                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5384                                                   NULL,
5385                                                   "no ipv6 item in pattern");
5386         }
5387         return ret;
5388 }
5389
5390 int
5391 flow_dv_modify_match_cb(void *tool_ctx __rte_unused,
5392                         struct mlx5_list_entry *entry, void *cb_ctx)
5393 {
5394         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5395         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5396         struct mlx5_flow_dv_modify_hdr_resource *resource =
5397                                   container_of(entry, typeof(*resource), entry);
5398         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5399
5400         key_len += ref->actions_num * sizeof(ref->actions[0]);
5401         return ref->actions_num != resource->actions_num ||
5402                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5403 }
5404
5405 static struct mlx5_indexed_pool *
5406 flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
5407 {
5408         struct mlx5_indexed_pool *ipool = __atomic_load_n
5409                                      (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);
5410
5411         if (!ipool) {
5412                 struct mlx5_indexed_pool *expected = NULL;
5413                 struct mlx5_indexed_pool_config cfg =
5414                     (struct mlx5_indexed_pool_config) {
5415                        .size = sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
5416                                                                    (index + 1) *
5417                                            sizeof(struct mlx5_modification_cmd),
5418                        .trunk_size = 64,
5419                        .grow_trunk = 3,
5420                        .grow_shift = 2,
5421                        .need_lock = 1,
5422                        .release_mem_en = !!sh->reclaim_mode,
5423                        .per_core_cache = sh->reclaim_mode ? 0 : (1 << 16),
5424                        .malloc = mlx5_malloc,
5425                        .free = mlx5_free,
5426                        .type = "mlx5_modify_action_resource",
5427                 };
5428
5429                 cfg.size = RTE_ALIGN(cfg.size, sizeof(ipool));
5430                 ipool = mlx5_ipool_create(&cfg);
5431                 if (!ipool)
5432                         return NULL;
5433                 if (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],
5434                                                  &expected, ipool, false,
5435                                                  __ATOMIC_SEQ_CST,
5436                                                  __ATOMIC_SEQ_CST)) {
5437                         mlx5_ipool_destroy(ipool);
5438                         ipool = __atomic_load_n(&sh->mdh_ipools[index],
5439                                                 __ATOMIC_SEQ_CST);
5440                 }
5441         }
5442         return ipool;
5443 }
5444
5445 struct mlx5_list_entry *
5446 flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx)
5447 {
5448         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5449         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5450         struct mlx5dv_dr_domain *ns;
5451         struct mlx5_flow_dv_modify_hdr_resource *entry;
5452         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5453         struct mlx5_indexed_pool *ipool = flow_dv_modify_ipool_get(sh,
5454                                                           ref->actions_num - 1);
5455         int ret;
5456         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5457         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5458         uint32_t idx;
5459
5460         if (unlikely(!ipool)) {
5461                 rte_flow_error_set(ctx->error, ENOMEM,
5462                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5463                                    NULL, "cannot allocate modify ipool");
5464                 return NULL;
5465         }
5466         entry = mlx5_ipool_zmalloc(ipool, &idx);
5467         if (!entry) {
5468                 rte_flow_error_set(ctx->error, ENOMEM,
5469                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5470                                    "cannot allocate resource memory");
5471                 return NULL;
5472         }
5473         rte_memcpy(&entry->ft_type,
5474                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5475                    key_len + data_len);
5476         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5477                 ns = sh->fdb_domain;
5478         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5479                 ns = sh->tx_domain;
5480         else
5481                 ns = sh->rx_domain;
5482         ret = mlx5_flow_os_create_flow_action_modify_header
5483                                         (sh->ctx, ns, entry,
5484                                          data_len, &entry->action);
5485         if (ret) {
5486                 mlx5_ipool_free(sh->mdh_ipools[ref->actions_num - 1], idx);
5487                 rte_flow_error_set(ctx->error, ENOMEM,
5488                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5489                                    NULL, "cannot create modification action");
5490                 return NULL;
5491         }
5492         entry->idx = idx;
5493         return &entry->entry;
5494 }
5495
5496 struct mlx5_list_entry *
5497 flow_dv_modify_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
5498                         void *cb_ctx)
5499 {
5500         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5501         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5502         struct mlx5_flow_dv_modify_hdr_resource *entry;
5503         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5504         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5505         uint32_t idx;
5506
5507         entry = mlx5_ipool_malloc(sh->mdh_ipools[ref->actions_num - 1],
5508                                   &idx);
5509         if (!entry) {
5510                 rte_flow_error_set(ctx->error, ENOMEM,
5511                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5512                                    "cannot allocate resource memory");
5513                 return NULL;
5514         }
5515         memcpy(entry, oentry, sizeof(*entry) + data_len);
5516         entry->idx = idx;
5517         return &entry->entry;
5518 }
5519
5520 void
5521 flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
5522 {
5523         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5524         struct mlx5_flow_dv_modify_hdr_resource *res =
5525                 container_of(entry, typeof(*res), entry);
5526
5527         mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
5528 }
5529
5530 /**
5531  * Validate the sample action.
5532  *
5533  * @param[in, out] action_flags
5534  *   Holds the actions detected until now.
5535  * @param[in] action
5536  *   Pointer to the sample action.
5537  * @param[in] dev
5538  *   Pointer to the Ethernet device structure.
5539  * @param[in] attr
5540  *   Attributes of flow that includes this action.
5541  * @param[in] item_flags
5542  *   Holds the items detected.
5543  * @param[in] rss
5544  *   Pointer to the RSS action.
5545  * @param[out] sample_rss
5546  *   Pointer to the RSS action in sample action list.
5547  * @param[out] count
5548  *   Pointer to the COUNT action in sample action list.
5549  * @param[out] fdb_mirror_limit
5550  *   Pointer to the FDB mirror limitation flag.
5551  * @param[out] error
5552  *   Pointer to error structure.
5553  *
5554  * @return
5555  *   0 on success, a negative errno value otherwise and rte_errno is set.
5556  */
5557 static int
5558 flow_dv_validate_action_sample(uint64_t *action_flags,
5559                                const struct rte_flow_action *action,
5560                                struct rte_eth_dev *dev,
5561                                const struct rte_flow_attr *attr,
5562                                uint64_t item_flags,
5563                                const struct rte_flow_action_rss *rss,
5564                                const struct rte_flow_action_rss **sample_rss,
5565                                const struct rte_flow_action_count **count,
5566                                int *fdb_mirror_limit,
5567                                struct rte_flow_error *error)
5568 {
5569         struct mlx5_priv *priv = dev->data->dev_private;
5570         struct mlx5_dev_config *dev_conf = &priv->config;
5571         const struct rte_flow_action_sample *sample = action->conf;
5572         const struct rte_flow_action *act;
5573         uint64_t sub_action_flags = 0;
5574         uint16_t queue_index = 0xFFFF;
5575         int actions_n = 0;
5576         int ret;
5577
5578         if (!sample)
5579                 return rte_flow_error_set(error, EINVAL,
5580                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5581                                           "configuration cannot be NULL");
5582         if (sample->ratio == 0)
5583                 return rte_flow_error_set(error, EINVAL,
5584                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5585                                           "ratio value starts from 1");
5586         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
5587                 return rte_flow_error_set(error, ENOTSUP,
5588                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5589                                           NULL,
5590                                           "sample action not supported");
5591         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5592                 return rte_flow_error_set(error, EINVAL,
5593                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5594                                           "Multiple sample actions not "
5595                                           "supported");
5596         if (*action_flags & MLX5_FLOW_ACTION_METER)
5597                 return rte_flow_error_set(error, EINVAL,
5598                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5599                                           "wrong action order, meter should "
5600                                           "be after sample action");
5601         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5602                 return rte_flow_error_set(error, EINVAL,
5603                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5604                                           "wrong action order, jump should "
5605                                           "be after sample action");
5606         act = sample->actions;
5607         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5608                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5609                         return rte_flow_error_set(error, ENOTSUP,
5610                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5611                                                   act, "too many actions");
5612                 switch (act->type) {
5613                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5614                         ret = mlx5_flow_validate_action_queue(act,
5615                                                               sub_action_flags,
5616                                                               dev,
5617                                                               attr, error);
5618                         if (ret < 0)
5619                                 return ret;
5620                         queue_index = ((const struct rte_flow_action_queue *)
5621                                                         (act->conf))->index;
5622                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5623                         ++actions_n;
5624                         break;
5625                 case RTE_FLOW_ACTION_TYPE_RSS:
5626                         *sample_rss = act->conf;
5627                         ret = mlx5_flow_validate_action_rss(act,
5628                                                             sub_action_flags,
5629                                                             dev, attr,
5630                                                             item_flags,
5631                                                             error);
5632                         if (ret < 0)
5633                                 return ret;
5634                         if (rss && *sample_rss &&
5635                             ((*sample_rss)->level != rss->level ||
5636                             (*sample_rss)->types != rss->types))
5637                                 return rte_flow_error_set(error, ENOTSUP,
5638                                         RTE_FLOW_ERROR_TYPE_ACTION,
5639                                         NULL,
5640                                         "Can't use the different RSS types "
5641                                         "or level in the same flow");
5642                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5643                                 queue_index = (*sample_rss)->queue[0];
5644                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5645                         ++actions_n;
5646                         break;
5647                 case RTE_FLOW_ACTION_TYPE_MARK:
5648                         ret = flow_dv_validate_action_mark(dev, act,
5649                                                            sub_action_flags,
5650                                                            attr, error);
5651                         if (ret < 0)
5652                                 return ret;
5653                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5654                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5655                                                 MLX5_FLOW_ACTION_MARK_EXT;
5656                         else
5657                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5658                         ++actions_n;
5659                         break;
5660                 case RTE_FLOW_ACTION_TYPE_COUNT:
5661                         ret = flow_dv_validate_action_count
5662                                 (dev, is_shared_action_count(act),
5663                                  *action_flags | sub_action_flags,
5664                                  error);
5665                         if (ret < 0)
5666                                 return ret;
5667                         *count = act->conf;
5668                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5669                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5670                         ++actions_n;
5671                         break;
5672                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5673                         ret = flow_dv_validate_action_port_id(dev,
5674                                                               sub_action_flags,
5675                                                               act,
5676                                                               attr,
5677                                                               error);
5678                         if (ret)
5679                                 return ret;
5680                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5681                         ++actions_n;
5682                         break;
5683                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5684                         ret = flow_dv_validate_action_raw_encap_decap
5685                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5686                                  &actions_n, action, item_flags, error);
5687                         if (ret < 0)
5688                                 return ret;
5689                         ++actions_n;
5690                         break;
5691                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5692                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5693                         ret = flow_dv_validate_action_l2_encap(dev,
5694                                                                sub_action_flags,
5695                                                                act, attr,
5696                                                                error);
5697                         if (ret < 0)
5698                                 return ret;
5699                         sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5700                         ++actions_n;
5701                         break;
5702                 default:
5703                         return rte_flow_error_set(error, ENOTSUP,
5704                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5705                                                   NULL,
5706                                                   "Doesn't support optional "
5707                                                   "action");
5708                 }
5709         }
5710         if (attr->ingress && !attr->transfer) {
5711                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5712                                           MLX5_FLOW_ACTION_RSS)))
5713                         return rte_flow_error_set(error, EINVAL,
5714                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5715                                                   NULL,
5716                                                   "Ingress must has a dest "
5717                                                   "QUEUE for Sample");
5718         } else if (attr->egress && !attr->transfer) {
5719                 return rte_flow_error_set(error, ENOTSUP,
5720                                           RTE_FLOW_ERROR_TYPE_ACTION,
5721                                           NULL,
5722                                           "Sample Only support Ingress "
5723                                           "or E-Switch");
5724         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5725                 MLX5_ASSERT(attr->transfer);
5726                 if (sample->ratio > 1)
5727                         return rte_flow_error_set(error, ENOTSUP,
5728                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5729                                                   NULL,
5730                                                   "E-Switch doesn't support "
5731                                                   "any optional action "
5732                                                   "for sampling");
5733                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5734                         return rte_flow_error_set(error, ENOTSUP,
5735                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5736                                                   NULL,
5737                                                   "unsupported action QUEUE");
5738                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5739                         return rte_flow_error_set(error, ENOTSUP,
5740                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5741                                                   NULL,
5742                                                   "unsupported action QUEUE");
5743                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5744                         return rte_flow_error_set(error, EINVAL,
5745                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5746                                                   NULL,
5747                                                   "E-Switch must has a dest "
5748                                                   "port for mirroring");
5749                 if (!priv->config.hca_attr.reg_c_preserve &&
5750                      priv->representor_id != UINT16_MAX)
5751                         *fdb_mirror_limit = 1;
5752         }
5753         /* Continue validation for Xcap actions.*/
5754         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5755             (queue_index == 0xFFFF ||
5756              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5757                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5758                      MLX5_FLOW_XCAP_ACTIONS)
5759                         return rte_flow_error_set(error, ENOTSUP,
5760                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5761                                                   NULL, "encap and decap "
5762                                                   "combination aren't "
5763                                                   "supported");
5764                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5765                                                         MLX5_FLOW_ACTION_ENCAP))
5766                         return rte_flow_error_set(error, ENOTSUP,
5767                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5768                                                   NULL, "encap is not supported"
5769                                                   " for ingress traffic");
5770         }
5771         return 0;
5772 }
5773
5774 /**
5775  * Find existing modify-header resource or create and register a new one.
5776  *
5777  * @param dev[in, out]
5778  *   Pointer to rte_eth_dev structure.
5779  * @param[in, out] resource
5780  *   Pointer to modify-header resource.
5781  * @parm[in, out] dev_flow
5782  *   Pointer to the dev_flow.
5783  * @param[out] error
5784  *   pointer to error structure.
5785  *
5786  * @return
5787  *   0 on success otherwise -errno and errno is set.
5788  */
5789 static int
5790 flow_dv_modify_hdr_resource_register
5791                         (struct rte_eth_dev *dev,
5792                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5793                          struct mlx5_flow *dev_flow,
5794                          struct rte_flow_error *error)
5795 {
5796         struct mlx5_priv *priv = dev->data->dev_private;
5797         struct mlx5_dev_ctx_shared *sh = priv->sh;
5798         uint32_t key_len = sizeof(*resource) -
5799                            offsetof(typeof(*resource), ft_type) +
5800                            resource->actions_num * sizeof(resource->actions[0]);
5801         struct mlx5_list_entry *entry;
5802         struct mlx5_flow_cb_ctx ctx = {
5803                 .error = error,
5804                 .data = resource,
5805         };
5806         struct mlx5_hlist *modify_cmds;
5807         uint64_t key64;
5808
5809         modify_cmds = flow_dv_hlist_prepare(sh, &sh->modify_cmds,
5810                                 "hdr_modify",
5811                                 MLX5_FLOW_HDR_MODIFY_HTABLE_SZ,
5812                                 true, false, sh,
5813                                 flow_dv_modify_create_cb,
5814                                 flow_dv_modify_match_cb,
5815                                 flow_dv_modify_remove_cb,
5816                                 flow_dv_modify_clone_cb,
5817                                 flow_dv_modify_clone_free_cb);
5818         if (unlikely(!modify_cmds))
5819                 return -rte_errno;
5820         resource->root = !dev_flow->dv.group;
5821         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5822                                                                 resource->root))
5823                 return rte_flow_error_set(error, EOVERFLOW,
5824                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5825                                           "too many modify header items");
5826         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5827         entry = mlx5_hlist_register(modify_cmds, key64, &ctx);
5828         if (!entry)
5829                 return -rte_errno;
5830         resource = container_of(entry, typeof(*resource), entry);
5831         dev_flow->handle->dvh.modify_hdr = resource;
5832         return 0;
5833 }
5834
5835 /**
5836  * Get DV flow counter by index.
5837  *
5838  * @param[in] dev
5839  *   Pointer to the Ethernet device structure.
5840  * @param[in] idx
5841  *   mlx5 flow counter index in the container.
5842  * @param[out] ppool
5843  *   mlx5 flow counter pool in the container.
5844  *
5845  * @return
5846  *   Pointer to the counter, NULL otherwise.
5847  */
5848 static struct mlx5_flow_counter *
5849 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5850                            uint32_t idx,
5851                            struct mlx5_flow_counter_pool **ppool)
5852 {
5853         struct mlx5_priv *priv = dev->data->dev_private;
5854         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5855         struct mlx5_flow_counter_pool *pool;
5856
5857         /* Decrease to original index and clear shared bit. */
5858         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5859         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5860         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5861         MLX5_ASSERT(pool);
5862         if (ppool)
5863                 *ppool = pool;
5864         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5865 }
5866
5867 /**
5868  * Check the devx counter belongs to the pool.
5869  *
5870  * @param[in] pool
5871  *   Pointer to the counter pool.
5872  * @param[in] id
5873  *   The counter devx ID.
5874  *
5875  * @return
5876  *   True if counter belongs to the pool, false otherwise.
5877  */
5878 static bool
5879 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5880 {
5881         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5882                    MLX5_COUNTERS_PER_POOL;
5883
5884         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5885                 return true;
5886         return false;
5887 }
5888
5889 /**
5890  * Get a pool by devx counter ID.
5891  *
5892  * @param[in] cmng
5893  *   Pointer to the counter management.
5894  * @param[in] id
5895  *   The counter devx ID.
5896  *
5897  * @return
5898  *   The counter pool pointer if exists, NULL otherwise,
5899  */
5900 static struct mlx5_flow_counter_pool *
5901 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5902 {
5903         uint32_t i;
5904         struct mlx5_flow_counter_pool *pool = NULL;
5905
5906         rte_spinlock_lock(&cmng->pool_update_sl);
5907         /* Check last used pool. */
5908         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5909             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5910                 pool = cmng->pools[cmng->last_pool_idx];
5911                 goto out;
5912         }
5913         /* ID out of range means no suitable pool in the container. */
5914         if (id > cmng->max_id || id < cmng->min_id)
5915                 goto out;
5916         /*
5917          * Find the pool from the end of the container, since mostly counter
5918          * ID is sequence increasing, and the last pool should be the needed
5919          * one.
5920          */
5921         i = cmng->n_valid;
5922         while (i--) {
5923                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5924
5925                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5926                         pool = pool_tmp;
5927                         break;
5928                 }
5929         }
5930 out:
5931         rte_spinlock_unlock(&cmng->pool_update_sl);
5932         return pool;
5933 }
5934
5935 /**
5936  * Resize a counter container.
5937  *
5938  * @param[in] dev
5939  *   Pointer to the Ethernet device structure.
5940  *
5941  * @return
5942  *   0 on success, otherwise negative errno value and rte_errno is set.
5943  */
5944 static int
5945 flow_dv_container_resize(struct rte_eth_dev *dev)
5946 {
5947         struct mlx5_priv *priv = dev->data->dev_private;
5948         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5949         void *old_pools = cmng->pools;
5950         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5951         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5952         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5953
5954         if (!pools) {
5955                 rte_errno = ENOMEM;
5956                 return -ENOMEM;
5957         }
5958         if (old_pools)
5959                 memcpy(pools, old_pools, cmng->n *
5960                                        sizeof(struct mlx5_flow_counter_pool *));
5961         cmng->n = resize;
5962         cmng->pools = pools;
5963         if (old_pools)
5964                 mlx5_free(old_pools);
5965         return 0;
5966 }
5967
5968 /**
5969  * Query a devx flow counter.
5970  *
5971  * @param[in] dev
5972  *   Pointer to the Ethernet device structure.
5973  * @param[in] counter
5974  *   Index to the flow counter.
5975  * @param[out] pkts
5976  *   The statistics value of packets.
5977  * @param[out] bytes
5978  *   The statistics value of bytes.
5979  *
5980  * @return
5981  *   0 on success, otherwise a negative errno value and rte_errno is set.
5982  */
5983 static inline int
5984 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5985                      uint64_t *bytes)
5986 {
5987         struct mlx5_priv *priv = dev->data->dev_private;
5988         struct mlx5_flow_counter_pool *pool = NULL;
5989         struct mlx5_flow_counter *cnt;
5990         int offset;
5991
5992         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5993         MLX5_ASSERT(pool);
5994         if (priv->sh->cmng.counter_fallback)
5995                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
5996                                         0, pkts, bytes, 0, NULL, NULL, 0);
5997         rte_spinlock_lock(&pool->sl);
5998         if (!pool->raw) {
5999                 *pkts = 0;
6000                 *bytes = 0;
6001         } else {
6002                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
6003                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
6004                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
6005         }
6006         rte_spinlock_unlock(&pool->sl);
6007         return 0;
6008 }
6009
6010 /**
6011  * Create and initialize a new counter pool.
6012  *
6013  * @param[in] dev
6014  *   Pointer to the Ethernet device structure.
6015  * @param[out] dcs
6016  *   The devX counter handle.
6017  * @param[in] age
6018  *   Whether the pool is for counter that was allocated for aging.
6019  * @param[in/out] cont_cur
6020  *   Pointer to the container pointer, it will be update in pool resize.
6021  *
6022  * @return
6023  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
6024  */
6025 static struct mlx5_flow_counter_pool *
6026 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
6027                     uint32_t age)
6028 {
6029         struct mlx5_priv *priv = dev->data->dev_private;
6030         struct mlx5_flow_counter_pool *pool;
6031         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6032         bool fallback = priv->sh->cmng.counter_fallback;
6033         uint32_t size = sizeof(*pool);
6034
6035         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
6036         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
6037         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
6038         if (!pool) {
6039                 rte_errno = ENOMEM;
6040                 return NULL;
6041         }
6042         pool->raw = NULL;
6043         pool->is_aged = !!age;
6044         pool->query_gen = 0;
6045         pool->min_dcs = dcs;
6046         rte_spinlock_init(&pool->sl);
6047         rte_spinlock_init(&pool->csl);
6048         TAILQ_INIT(&pool->counters[0]);
6049         TAILQ_INIT(&pool->counters[1]);
6050         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
6051         rte_spinlock_lock(&cmng->pool_update_sl);
6052         pool->index = cmng->n_valid;
6053         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
6054                 mlx5_free(pool);
6055                 rte_spinlock_unlock(&cmng->pool_update_sl);
6056                 return NULL;
6057         }
6058         cmng->pools[pool->index] = pool;
6059         cmng->n_valid++;
6060         if (unlikely(fallback)) {
6061                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
6062
6063                 if (base < cmng->min_id)
6064                         cmng->min_id = base;
6065                 if (base > cmng->max_id)
6066                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
6067                 cmng->last_pool_idx = pool->index;
6068         }
6069         rte_spinlock_unlock(&cmng->pool_update_sl);
6070         return pool;
6071 }
6072
6073 /**
6074  * Prepare a new counter and/or a new counter pool.
6075  *
6076  * @param[in] dev
6077  *   Pointer to the Ethernet device structure.
6078  * @param[out] cnt_free
6079  *   Where to put the pointer of a new counter.
6080  * @param[in] age
6081  *   Whether the pool is for counter that was allocated for aging.
6082  *
6083  * @return
6084  *   The counter pool pointer and @p cnt_free is set on success,
6085  *   NULL otherwise and rte_errno is set.
6086  */
6087 static struct mlx5_flow_counter_pool *
6088 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
6089                              struct mlx5_flow_counter **cnt_free,
6090                              uint32_t age)
6091 {
6092         struct mlx5_priv *priv = dev->data->dev_private;
6093         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6094         struct mlx5_flow_counter_pool *pool;
6095         struct mlx5_counters tmp_tq;
6096         struct mlx5_devx_obj *dcs = NULL;
6097         struct mlx5_flow_counter *cnt;
6098         enum mlx5_counter_type cnt_type =
6099                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6100         bool fallback = priv->sh->cmng.counter_fallback;
6101         uint32_t i;
6102
6103         if (fallback) {
6104                 /* bulk_bitmap must be 0 for single counter allocation. */
6105                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
6106                 if (!dcs)
6107                         return NULL;
6108                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
6109                 if (!pool) {
6110                         pool = flow_dv_pool_create(dev, dcs, age);
6111                         if (!pool) {
6112                                 mlx5_devx_cmd_destroy(dcs);
6113                                 return NULL;
6114                         }
6115                 }
6116                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
6117                 cnt = MLX5_POOL_GET_CNT(pool, i);
6118                 cnt->pool = pool;
6119                 cnt->dcs_when_free = dcs;
6120                 *cnt_free = cnt;
6121                 return pool;
6122         }
6123         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
6124         if (!dcs) {
6125                 rte_errno = ENODATA;
6126                 return NULL;
6127         }
6128         pool = flow_dv_pool_create(dev, dcs, age);
6129         if (!pool) {
6130                 mlx5_devx_cmd_destroy(dcs);
6131                 return NULL;
6132         }
6133         TAILQ_INIT(&tmp_tq);
6134         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
6135                 cnt = MLX5_POOL_GET_CNT(pool, i);
6136                 cnt->pool = pool;
6137                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
6138         }
6139         rte_spinlock_lock(&cmng->csl[cnt_type]);
6140         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
6141         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6142         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
6143         (*cnt_free)->pool = pool;
6144         return pool;
6145 }
6146
6147 /**
6148  * Allocate a flow counter.
6149  *
6150  * @param[in] dev
6151  *   Pointer to the Ethernet device structure.
6152  * @param[in] age
6153  *   Whether the counter was allocated for aging.
6154  *
6155  * @return
6156  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6157  */
6158 static uint32_t
6159 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
6160 {
6161         struct mlx5_priv *priv = dev->data->dev_private;
6162         struct mlx5_flow_counter_pool *pool = NULL;
6163         struct mlx5_flow_counter *cnt_free = NULL;
6164         bool fallback = priv->sh->cmng.counter_fallback;
6165         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6166         enum mlx5_counter_type cnt_type =
6167                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6168         uint32_t cnt_idx;
6169
6170         if (!priv->config.devx) {
6171                 rte_errno = ENOTSUP;
6172                 return 0;
6173         }
6174         /* Get free counters from container. */
6175         rte_spinlock_lock(&cmng->csl[cnt_type]);
6176         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
6177         if (cnt_free)
6178                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
6179         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6180         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
6181                 goto err;
6182         pool = cnt_free->pool;
6183         if (fallback)
6184                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
6185         /* Create a DV counter action only in the first time usage. */
6186         if (!cnt_free->action) {
6187                 uint16_t offset;
6188                 struct mlx5_devx_obj *dcs;
6189                 int ret;
6190
6191                 if (!fallback) {
6192                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
6193                         dcs = pool->min_dcs;
6194                 } else {
6195                         offset = 0;
6196                         dcs = cnt_free->dcs_when_free;
6197                 }
6198                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
6199                                                             &cnt_free->action);
6200                 if (ret) {
6201                         rte_errno = errno;
6202                         goto err;
6203                 }
6204         }
6205         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
6206                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
6207         /* Update the counter reset values. */
6208         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
6209                                  &cnt_free->bytes))
6210                 goto err;
6211         if (!fallback && !priv->sh->cmng.query_thread_on)
6212                 /* Start the asynchronous batch query by the host thread. */
6213                 mlx5_set_query_alarm(priv->sh);
6214         /*
6215          * When the count action isn't shared (by ID), shared_info field is
6216          * used for indirect action API's refcnt.
6217          * When the counter action is not shared neither by ID nor by indirect
6218          * action API, shared info must be 1.
6219          */
6220         cnt_free->shared_info.refcnt = 1;
6221         return cnt_idx;
6222 err:
6223         if (cnt_free) {
6224                 cnt_free->pool = pool;
6225                 if (fallback)
6226                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
6227                 rte_spinlock_lock(&cmng->csl[cnt_type]);
6228                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
6229                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
6230         }
6231         return 0;
6232 }
6233
6234 /**
6235  * Allocate a shared flow counter.
6236  *
6237  * @param[in] ctx
6238  *   Pointer to the shared counter configuration.
6239  * @param[in] data
6240  *   Pointer to save the allocated counter index.
6241  *
6242  * @return
6243  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6244  */
6245
6246 static int32_t
6247 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
6248 {
6249         struct mlx5_shared_counter_conf *conf = ctx;
6250         struct rte_eth_dev *dev = conf->dev;
6251         struct mlx5_flow_counter *cnt;
6252
6253         data->dword = flow_dv_counter_alloc(dev, 0);
6254         data->dword |= MLX5_CNT_SHARED_OFFSET;
6255         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
6256         cnt->shared_info.id = conf->id;
6257         return 0;
6258 }
6259
6260 /**
6261  * Get a shared flow counter.
6262  *
6263  * @param[in] dev
6264  *   Pointer to the Ethernet device structure.
6265  * @param[in] id
6266  *   Counter identifier.
6267  *
6268  * @return
6269  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6270  */
6271 static uint32_t
6272 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
6273 {
6274         struct mlx5_priv *priv = dev->data->dev_private;
6275         struct mlx5_shared_counter_conf conf = {
6276                 .dev = dev,
6277                 .id = id,
6278         };
6279         union mlx5_l3t_data data = {
6280                 .dword = 0,
6281         };
6282
6283         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
6284                                flow_dv_counter_alloc_shared_cb, &conf);
6285         return data.dword;
6286 }
6287
6288 /**
6289  * Get age param from counter index.
6290  *
6291  * @param[in] dev
6292  *   Pointer to the Ethernet device structure.
6293  * @param[in] counter
6294  *   Index to the counter handler.
6295  *
6296  * @return
6297  *   The aging parameter specified for the counter index.
6298  */
6299 static struct mlx5_age_param*
6300 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
6301                                 uint32_t counter)
6302 {
6303         struct mlx5_flow_counter *cnt;
6304         struct mlx5_flow_counter_pool *pool = NULL;
6305
6306         flow_dv_counter_get_by_idx(dev, counter, &pool);
6307         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
6308         cnt = MLX5_POOL_GET_CNT(pool, counter);
6309         return MLX5_CNT_TO_AGE(cnt);
6310 }
6311
6312 /**
6313  * Remove a flow counter from aged counter list.
6314  *
6315  * @param[in] dev
6316  *   Pointer to the Ethernet device structure.
6317  * @param[in] counter
6318  *   Index to the counter handler.
6319  * @param[in] cnt
6320  *   Pointer to the counter handler.
6321  */
6322 static void
6323 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
6324                                 uint32_t counter, struct mlx5_flow_counter *cnt)
6325 {
6326         struct mlx5_age_info *age_info;
6327         struct mlx5_age_param *age_param;
6328         struct mlx5_priv *priv = dev->data->dev_private;
6329         uint16_t expected = AGE_CANDIDATE;
6330
6331         age_info = GET_PORT_AGE_INFO(priv);
6332         age_param = flow_dv_counter_idx_get_age(dev, counter);
6333         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
6334                                          AGE_FREE, false, __ATOMIC_RELAXED,
6335                                          __ATOMIC_RELAXED)) {
6336                 /**
6337                  * We need the lock even it is age timeout,
6338                  * since counter may still in process.
6339                  */
6340                 rte_spinlock_lock(&age_info->aged_sl);
6341                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
6342                 rte_spinlock_unlock(&age_info->aged_sl);
6343                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
6344         }
6345 }
6346
6347 /**
6348  * Release a flow counter.
6349  *
6350  * @param[in] dev
6351  *   Pointer to the Ethernet device structure.
6352  * @param[in] counter
6353  *   Index to the counter handler.
6354  */
6355 static void
6356 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
6357 {
6358         struct mlx5_priv *priv = dev->data->dev_private;
6359         struct mlx5_flow_counter_pool *pool = NULL;
6360         struct mlx5_flow_counter *cnt;
6361         enum mlx5_counter_type cnt_type;
6362
6363         if (!counter)
6364                 return;
6365         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6366         MLX5_ASSERT(pool);
6367         if (pool->is_aged) {
6368                 flow_dv_counter_remove_from_age(dev, counter, cnt);
6369         } else {
6370                 /*
6371                  * If the counter action is shared by ID, the l3t_clear_entry
6372                  * function reduces its references counter. If after the
6373                  * reduction the action is still referenced, the function
6374                  * returns here and does not release it.
6375                  */
6376                 if (IS_LEGACY_SHARED_CNT(counter) &&
6377                     mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl,
6378                                          cnt->shared_info.id))
6379                         return;
6380                 /*
6381                  * If the counter action is shared by indirect action API,
6382                  * the atomic function reduces its references counter.
6383                  * If after the reduction the action is still referenced, the
6384                  * function returns here and does not release it.
6385                  * When the counter action is not shared neither by ID nor by
6386                  * indirect action API, shared info is 1 before the reduction,
6387                  * so this condition is failed and function doesn't return here.
6388                  */
6389                 if (!IS_LEGACY_SHARED_CNT(counter) &&
6390                     __atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
6391                                        __ATOMIC_RELAXED))
6392                         return;
6393         }
6394         cnt->pool = pool;
6395         /*
6396          * Put the counter back to list to be updated in none fallback mode.
6397          * Currently, we are using two list alternately, while one is in query,
6398          * add the freed counter to the other list based on the pool query_gen
6399          * value. After query finishes, add counter the list to the global
6400          * container counter list. The list changes while query starts. In
6401          * this case, lock will not be needed as query callback and release
6402          * function both operate with the different list.
6403          */
6404         if (!priv->sh->cmng.counter_fallback) {
6405                 rte_spinlock_lock(&pool->csl);
6406                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
6407                 rte_spinlock_unlock(&pool->csl);
6408         } else {
6409                 cnt->dcs_when_free = cnt->dcs_when_active;
6410                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6411                                            MLX5_COUNTER_TYPE_ORIGIN;
6412                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
6413                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
6414                                   cnt, next);
6415                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
6416         }
6417 }
6418
6419 /**
6420  * Resize a meter id container.
6421  *
6422  * @param[in] dev
6423  *   Pointer to the Ethernet device structure.
6424  *
6425  * @return
6426  *   0 on success, otherwise negative errno value and rte_errno is set.
6427  */
6428 static int
6429 flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
6430 {
6431         struct mlx5_priv *priv = dev->data->dev_private;
6432         struct mlx5_aso_mtr_pools_mng *pools_mng =
6433                                 &priv->sh->mtrmng->pools_mng;
6434         void *old_pools = pools_mng->pools;
6435         uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
6436         uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
6437         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6438
6439         if (!pools) {
6440                 rte_errno = ENOMEM;
6441                 return -ENOMEM;
6442         }
6443         if (!pools_mng->n)
6444                 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6445                         mlx5_free(pools);
6446                         return -ENOMEM;
6447                 }
6448         if (old_pools)
6449                 memcpy(pools, old_pools, pools_mng->n *
6450                                        sizeof(struct mlx5_aso_mtr_pool *));
6451         pools_mng->n = resize;
6452         pools_mng->pools = pools;
6453         if (old_pools)
6454                 mlx5_free(old_pools);
6455         return 0;
6456 }
6457
6458 /**
6459  * Prepare a new meter and/or a new meter pool.
6460  *
6461  * @param[in] dev
6462  *   Pointer to the Ethernet device structure.
6463  * @param[out] mtr_free
6464  *   Where to put the pointer of a new meter.g.
6465  *
6466  * @return
6467  *   The meter pool pointer and @mtr_free is set on success,
6468  *   NULL otherwise and rte_errno is set.
6469  */
6470 static struct mlx5_aso_mtr_pool *
6471 flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
6472                              struct mlx5_aso_mtr **mtr_free)
6473 {
6474         struct mlx5_priv *priv = dev->data->dev_private;
6475         struct mlx5_aso_mtr_pools_mng *pools_mng =
6476                                 &priv->sh->mtrmng->pools_mng;
6477         struct mlx5_aso_mtr_pool *pool = NULL;
6478         struct mlx5_devx_obj *dcs = NULL;
6479         uint32_t i;
6480         uint32_t log_obj_size;
6481
6482         log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6483         dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->ctx,
6484                         priv->sh->pdn, log_obj_size);
6485         if (!dcs) {
6486                 rte_errno = ENODATA;
6487                 return NULL;
6488         }
6489         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6490         if (!pool) {
6491                 rte_errno = ENOMEM;
6492                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6493                 return NULL;
6494         }
6495         pool->devx_obj = dcs;
6496         pool->index = pools_mng->n_valid;
6497         if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6498                 mlx5_free(pool);
6499                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6500                 return NULL;
6501         }
6502         pools_mng->pools[pool->index] = pool;
6503         pools_mng->n_valid++;
6504         for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6505                 pool->mtrs[i].offset = i;
6506                 LIST_INSERT_HEAD(&pools_mng->meters,
6507                                                 &pool->mtrs[i], next);
6508         }
6509         pool->mtrs[0].offset = 0;
6510         *mtr_free = &pool->mtrs[0];
6511         return pool;
6512 }
6513
6514 /**
6515  * Release a flow meter into pool.
6516  *
6517  * @param[in] dev
6518  *   Pointer to the Ethernet device structure.
6519  * @param[in] mtr_idx
6520  *   Index to aso flow meter.
6521  */
6522 static void
6523 flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6524 {
6525         struct mlx5_priv *priv = dev->data->dev_private;
6526         struct mlx5_aso_mtr_pools_mng *pools_mng =
6527                                 &priv->sh->mtrmng->pools_mng;
6528         struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6529
6530         MLX5_ASSERT(aso_mtr);
6531         rte_spinlock_lock(&pools_mng->mtrsl);
6532         memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6533         aso_mtr->state = ASO_METER_FREE;
6534         LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6535         rte_spinlock_unlock(&pools_mng->mtrsl);
6536 }
6537
6538 /**
6539  * Allocate a aso flow meter.
6540  *
6541  * @param[in] dev
6542  *   Pointer to the Ethernet device structure.
6543  *
6544  * @return
6545  *   Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6546  */
6547 static uint32_t
6548 flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6549 {
6550         struct mlx5_priv *priv = dev->data->dev_private;
6551         struct mlx5_aso_mtr *mtr_free = NULL;
6552         struct mlx5_aso_mtr_pools_mng *pools_mng =
6553                                 &priv->sh->mtrmng->pools_mng;
6554         struct mlx5_aso_mtr_pool *pool;
6555         uint32_t mtr_idx = 0;
6556
6557         if (!priv->config.devx) {
6558                 rte_errno = ENOTSUP;
6559                 return 0;
6560         }
6561         /* Allocate the flow meter memory. */
6562         /* Get free meters from management. */
6563         rte_spinlock_lock(&pools_mng->mtrsl);
6564         mtr_free = LIST_FIRST(&pools_mng->meters);
6565         if (mtr_free)
6566                 LIST_REMOVE(mtr_free, next);
6567         if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6568                 rte_spinlock_unlock(&pools_mng->mtrsl);
6569                 return 0;
6570         }
6571         mtr_free->state = ASO_METER_WAIT;
6572         rte_spinlock_unlock(&pools_mng->mtrsl);
6573         pool = container_of(mtr_free,
6574                         struct mlx5_aso_mtr_pool,
6575                         mtrs[mtr_free->offset]);
6576         mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6577         if (!mtr_free->fm.meter_action) {
6578 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6579                 struct rte_flow_error error;
6580                 uint8_t reg_id;
6581
6582                 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6583                 mtr_free->fm.meter_action =
6584                         mlx5_glue->dv_create_flow_action_aso
6585                                                 (priv->sh->rx_domain,
6586                                                  pool->devx_obj->obj,
6587                                                  mtr_free->offset,
6588                                                  (1 << MLX5_FLOW_COLOR_GREEN),
6589                                                  reg_id - REG_C_0);
6590 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6591                 if (!mtr_free->fm.meter_action) {
6592                         flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6593                         return 0;
6594                 }
6595         }
6596         return mtr_idx;
6597 }
6598
6599 /**
6600  * Verify the @p attributes will be correctly understood by the NIC and store
6601  * them in the @p flow if everything is correct.
6602  *
6603  * @param[in] dev
6604  *   Pointer to dev struct.
6605  * @param[in] attributes
6606  *   Pointer to flow attributes
6607  * @param[in] external
6608  *   This flow rule is created by request external to PMD.
6609  * @param[out] error
6610  *   Pointer to error structure.
6611  *
6612  * @return
6613  *   - 0 on success and non root table.
6614  *   - 1 on success and root table.
6615  *   - a negative errno value otherwise and rte_errno is set.
6616  */
6617 static int
6618 flow_dv_validate_attributes(struct rte_eth_dev *dev,
6619                             const struct mlx5_flow_tunnel *tunnel,
6620                             const struct rte_flow_attr *attributes,
6621                             const struct flow_grp_info *grp_info,
6622                             struct rte_flow_error *error)
6623 {
6624         struct mlx5_priv *priv = dev->data->dev_private;
6625         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6626         int ret = 0;
6627
6628 #ifndef HAVE_MLX5DV_DR
6629         RTE_SET_USED(tunnel);
6630         RTE_SET_USED(grp_info);
6631         if (attributes->group)
6632                 return rte_flow_error_set(error, ENOTSUP,
6633                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6634                                           NULL,
6635                                           "groups are not supported");
6636 #else
6637         uint32_t table = 0;
6638
6639         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6640                                        grp_info, error);
6641         if (ret)
6642                 return ret;
6643         if (!table)
6644                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6645 #endif
6646         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6647             attributes->priority > lowest_priority)
6648                 return rte_flow_error_set(error, ENOTSUP,
6649                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6650                                           NULL,
6651                                           "priority out of range");
6652         if (attributes->transfer) {
6653                 if (!priv->config.dv_esw_en)
6654                         return rte_flow_error_set
6655                                 (error, ENOTSUP,
6656                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6657                                  "E-Switch dr is not supported");
6658                 if (!(priv->representor || priv->master))
6659                         return rte_flow_error_set
6660                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6661                                  NULL, "E-Switch configuration can only be"
6662                                  " done by a master or a representor device");
6663                 if (attributes->egress)
6664                         return rte_flow_error_set
6665                                 (error, ENOTSUP,
6666                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6667                                  "egress is not supported");
6668         }
6669         if (!(attributes->egress ^ attributes->ingress))
6670                 return rte_flow_error_set(error, ENOTSUP,
6671                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6672                                           "must specify exactly one of "
6673                                           "ingress or egress");
6674         return ret;
6675 }
6676
6677 static uint16_t
6678 mlx5_flow_locate_proto_l3(const struct rte_flow_item **head,
6679                           const struct rte_flow_item *end)
6680 {
6681         const struct rte_flow_item *item = *head;
6682         uint16_t l3_protocol;
6683
6684         for (; item != end; item++) {
6685                 switch (item->type) {
6686                 default:
6687                         break;
6688                 case RTE_FLOW_ITEM_TYPE_IPV4:
6689                         l3_protocol = RTE_ETHER_TYPE_IPV4;
6690                         goto l3_ok;
6691                 case RTE_FLOW_ITEM_TYPE_IPV6:
6692                         l3_protocol = RTE_ETHER_TYPE_IPV6;
6693                         goto l3_ok;
6694                 case RTE_FLOW_ITEM_TYPE_ETH:
6695                         if (item->mask && item->spec) {
6696                                 MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_eth,
6697                                                             type, item,
6698                                                             l3_protocol);
6699                                 if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
6700                                     l3_protocol == RTE_ETHER_TYPE_IPV6)
6701                                         goto l3_ok;
6702                         }
6703                         break;
6704                 case RTE_FLOW_ITEM_TYPE_VLAN:
6705                         if (item->mask && item->spec) {
6706                                 MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_vlan,
6707                                                             inner_type, item,
6708                                                             l3_protocol);
6709                                 if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
6710                                     l3_protocol == RTE_ETHER_TYPE_IPV6)
6711                                         goto l3_ok;
6712                         }
6713                         break;
6714                 }
6715         }
6716         return 0;
6717 l3_ok:
6718         *head = item;
6719         return l3_protocol;
6720 }
6721
6722 static uint8_t
6723 mlx5_flow_locate_proto_l4(const struct rte_flow_item **head,
6724                           const struct rte_flow_item *end)
6725 {
6726         const struct rte_flow_item *item = *head;
6727         uint8_t l4_protocol;
6728
6729         for (; item != end; item++) {
6730                 switch (item->type) {
6731                 default:
6732                         break;
6733                 case RTE_FLOW_ITEM_TYPE_TCP:
6734                         l4_protocol = IPPROTO_TCP;
6735                         goto l4_ok;
6736                 case RTE_FLOW_ITEM_TYPE_UDP:
6737                         l4_protocol = IPPROTO_UDP;
6738                         goto l4_ok;
6739                 case RTE_FLOW_ITEM_TYPE_IPV4:
6740                         if (item->mask && item->spec) {
6741                                 const struct rte_flow_item_ipv4 *mask, *spec;
6742
6743                                 mask = (typeof(mask))item->mask;
6744                                 spec = (typeof(spec))item->spec;
6745                                 l4_protocol = mask->hdr.next_proto_id &
6746                                               spec->hdr.next_proto_id;
6747                                 if (l4_protocol == IPPROTO_TCP ||
6748                                     l4_protocol == IPPROTO_UDP)
6749                                         goto l4_ok;
6750                         }
6751                         break;
6752                 case RTE_FLOW_ITEM_TYPE_IPV6:
6753                         if (item->mask && item->spec) {
6754                                 const struct rte_flow_item_ipv6 *mask, *spec;
6755                                 mask = (typeof(mask))item->mask;
6756                                 spec = (typeof(spec))item->spec;
6757                                 l4_protocol = mask->hdr.proto & spec->hdr.proto;
6758                                 if (l4_protocol == IPPROTO_TCP ||
6759                                     l4_protocol == IPPROTO_UDP)
6760                                         goto l4_ok;
6761                         }
6762                         break;
6763                 }
6764         }
6765         return 0;
6766 l4_ok:
6767         *head = item;
6768         return l4_protocol;
6769 }
6770
6771 static int
6772 flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
6773                                 const struct rte_flow_item *rule_items,
6774                                 const struct rte_flow_item *integrity_item,
6775                                 struct rte_flow_error *error)
6776 {
6777         struct mlx5_priv *priv = dev->data->dev_private;
6778         const struct rte_flow_item *tunnel_item, *end_item, *item = rule_items;
6779         const struct rte_flow_item_integrity *mask = (typeof(mask))
6780                                                      integrity_item->mask;
6781         const struct rte_flow_item_integrity *spec = (typeof(spec))
6782                                                      integrity_item->spec;
6783         uint32_t protocol;
6784
6785         if (!priv->config.hca_attr.pkt_integrity_match)
6786                 return rte_flow_error_set(error, ENOTSUP,
6787                                           RTE_FLOW_ERROR_TYPE_ITEM,
6788                                           integrity_item,
6789                                           "packet integrity integrity_item not supported");
6790         if (!mask)
6791                 mask = &rte_flow_item_integrity_mask;
6792         if (!mlx5_validate_integrity_item(mask))
6793                 return rte_flow_error_set(error, ENOTSUP,
6794                                           RTE_FLOW_ERROR_TYPE_ITEM,
6795                                           integrity_item,
6796                                           "unsupported integrity filter");
6797         tunnel_item = mlx5_flow_find_tunnel_item(rule_items);
6798         if (spec->level > 1) {
6799                 if (!tunnel_item)
6800                         return rte_flow_error_set(error, ENOTSUP,
6801                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6802                                                   integrity_item,
6803                                                   "missing tunnel item");
6804                 item = tunnel_item;
6805                 end_item = mlx5_find_end_item(tunnel_item);
6806         } else {
6807                 end_item = tunnel_item ? tunnel_item :
6808                            mlx5_find_end_item(integrity_item);
6809         }
6810         if (mask->l3_ok || mask->ipv4_csum_ok) {
6811                 protocol = mlx5_flow_locate_proto_l3(&item, end_item);
6812                 if (!protocol)
6813                         return rte_flow_error_set(error, EINVAL,
6814                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6815                                                   integrity_item,
6816                                                   "missing L3 protocol");
6817         }
6818         if (mask->l4_ok || mask->l4_csum_ok) {
6819                 protocol = mlx5_flow_locate_proto_l4(&item, end_item);
6820                 if (!protocol)
6821                         return rte_flow_error_set(error, EINVAL,
6822                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6823                                                   integrity_item,
6824                                                   "missing L4 protocol");
6825         }
6826         return 0;
6827 }
6828
6829 /**
6830  * Internal validation function. For validating both actions and items.
6831  *
6832  * @param[in] dev
6833  *   Pointer to the rte_eth_dev structure.
6834  * @param[in] attr
6835  *   Pointer to the flow attributes.
6836  * @param[in] items
6837  *   Pointer to the list of items.
6838  * @param[in] actions
6839  *   Pointer to the list of actions.
6840  * @param[in] external
6841  *   This flow rule is created by request external to PMD.
6842  * @param[in] hairpin
6843  *   Number of hairpin TX actions, 0 means classic flow.
6844  * @param[out] error
6845  *   Pointer to the error structure.
6846  *
6847  * @return
6848  *   0 on success, a negative errno value otherwise and rte_errno is set.
6849  */
6850 static int
6851 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6852                  const struct rte_flow_item items[],
6853                  const struct rte_flow_action actions[],
6854                  bool external, int hairpin, struct rte_flow_error *error)
6855 {
6856         int ret;
6857         uint64_t action_flags = 0;
6858         uint64_t item_flags = 0;
6859         uint64_t last_item = 0;
6860         uint8_t next_protocol = 0xff;
6861         uint16_t ether_type = 0;
6862         int actions_n = 0;
6863         uint8_t item_ipv6_proto = 0;
6864         int fdb_mirror_limit = 0;
6865         int modify_after_mirror = 0;
6866         const struct rte_flow_item *geneve_item = NULL;
6867         const struct rte_flow_item *gre_item = NULL;
6868         const struct rte_flow_item *gtp_item = NULL;
6869         const struct rte_flow_action_raw_decap *decap;
6870         const struct rte_flow_action_raw_encap *encap;
6871         const struct rte_flow_action_rss *rss = NULL;
6872         const struct rte_flow_action_rss *sample_rss = NULL;
6873         const struct rte_flow_action_count *sample_count = NULL;
6874         const struct rte_flow_item_tcp nic_tcp_mask = {
6875                 .hdr = {
6876                         .tcp_flags = 0xFF,
6877                         .src_port = RTE_BE16(UINT16_MAX),
6878                         .dst_port = RTE_BE16(UINT16_MAX),
6879                 }
6880         };
6881         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6882                 .hdr = {
6883                         .src_addr =
6884                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6885                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6886                         .dst_addr =
6887                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6888                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6889                         .vtc_flow = RTE_BE32(0xffffffff),
6890                         .proto = 0xff,
6891                         .hop_limits = 0xff,
6892                 },
6893                 .has_frag_ext = 1,
6894         };
6895         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6896                 .hdr = {
6897                         .common = {
6898                                 .u32 =
6899                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6900                                         .type = 0xFF,
6901                                         }).u32),
6902                         },
6903                         .dummy[0] = 0xffffffff,
6904                 },
6905         };
6906         struct mlx5_priv *priv = dev->data->dev_private;
6907         struct mlx5_dev_config *dev_conf = &priv->config;
6908         uint16_t queue_index = 0xFFFF;
6909         const struct rte_flow_item_vlan *vlan_m = NULL;
6910         uint32_t rw_act_num = 0;
6911         uint64_t is_root;
6912         const struct mlx5_flow_tunnel *tunnel;
6913         enum mlx5_tof_rule_type tof_rule_type;
6914         struct flow_grp_info grp_info = {
6915                 .external = !!external,
6916                 .transfer = !!attr->transfer,
6917                 .fdb_def_rule = !!priv->fdb_def_rule,
6918                 .std_tbl_fix = true,
6919         };
6920         const struct rte_eth_hairpin_conf *conf;
6921         const struct rte_flow_item *rule_items = items;
6922         const struct rte_flow_item *port_id_item = NULL;
6923         bool def_policy = false;
6924
6925         if (items == NULL)
6926                 return -1;
6927         tunnel = is_tunnel_offload_active(dev) ?
6928                  mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
6929         if (tunnel) {
6930                 if (priv->representor)
6931                         return rte_flow_error_set
6932                                 (error, ENOTSUP,
6933                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6934                                  NULL, "decap not supported for VF representor");
6935                 if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
6936                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6937                 else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
6938                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6939                                         MLX5_FLOW_ACTION_DECAP;
6940                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6941                                         (dev, attr, tunnel, tof_rule_type);
6942         }
6943         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6944         if (ret < 0)
6945                 return ret;
6946         is_root = (uint64_t)ret;
6947         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6948                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6949                 int type = items->type;
6950
6951                 if (!mlx5_flow_os_item_supported(type))
6952                         return rte_flow_error_set(error, ENOTSUP,
6953                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6954                                                   NULL, "item not supported");
6955                 switch (type) {
6956                 case RTE_FLOW_ITEM_TYPE_VOID:
6957                         break;
6958                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6959                         ret = flow_dv_validate_item_port_id
6960                                         (dev, items, attr, item_flags, error);
6961                         if (ret < 0)
6962                                 return ret;
6963                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6964                         port_id_item = items;
6965                         break;
6966                 case RTE_FLOW_ITEM_TYPE_ETH:
6967                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6968                                                           true, error);
6969                         if (ret < 0)
6970                                 return ret;
6971                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6972                                              MLX5_FLOW_LAYER_OUTER_L2;
6973                         if (items->mask != NULL && items->spec != NULL) {
6974                                 ether_type =
6975                                         ((const struct rte_flow_item_eth *)
6976                                          items->spec)->type;
6977                                 ether_type &=
6978                                         ((const struct rte_flow_item_eth *)
6979                                          items->mask)->type;
6980                                 ether_type = rte_be_to_cpu_16(ether_type);
6981                         } else {
6982                                 ether_type = 0;
6983                         }
6984                         break;
6985                 case RTE_FLOW_ITEM_TYPE_VLAN:
6986                         ret = flow_dv_validate_item_vlan(items, item_flags,
6987                                                          dev, error);
6988                         if (ret < 0)
6989                                 return ret;
6990                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6991                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6992                         if (items->mask != NULL && items->spec != NULL) {
6993                                 ether_type =
6994                                         ((const struct rte_flow_item_vlan *)
6995                                          items->spec)->inner_type;
6996                                 ether_type &=
6997                                         ((const struct rte_flow_item_vlan *)
6998                                          items->mask)->inner_type;
6999                                 ether_type = rte_be_to_cpu_16(ether_type);
7000                         } else {
7001                                 ether_type = 0;
7002                         }
7003                         /* Store outer VLAN mask for of_push_vlan action. */
7004                         if (!tunnel)
7005                                 vlan_m = items->mask;
7006                         break;
7007                 case RTE_FLOW_ITEM_TYPE_IPV4:
7008                         mlx5_flow_tunnel_ip_check(items, next_protocol,
7009                                                   &item_flags, &tunnel);
7010                         ret = flow_dv_validate_item_ipv4(dev, items, item_flags,
7011                                                          last_item, ether_type,
7012                                                          error);
7013                         if (ret < 0)
7014                                 return ret;
7015                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
7016                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
7017                         if (items->mask != NULL &&
7018                             ((const struct rte_flow_item_ipv4 *)
7019                              items->mask)->hdr.next_proto_id) {
7020                                 next_protocol =
7021                                         ((const struct rte_flow_item_ipv4 *)
7022                                          (items->spec))->hdr.next_proto_id;
7023                                 next_protocol &=
7024                                         ((const struct rte_flow_item_ipv4 *)
7025                                          (items->mask))->hdr.next_proto_id;
7026                         } else {
7027                                 /* Reset for inner layer. */
7028                                 next_protocol = 0xff;
7029                         }
7030                         break;
7031                 case RTE_FLOW_ITEM_TYPE_IPV6:
7032                         mlx5_flow_tunnel_ip_check(items, next_protocol,
7033                                                   &item_flags, &tunnel);
7034                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
7035                                                            last_item,
7036                                                            ether_type,
7037                                                            &nic_ipv6_mask,
7038                                                            error);
7039                         if (ret < 0)
7040                                 return ret;
7041                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
7042                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
7043                         if (items->mask != NULL &&
7044                             ((const struct rte_flow_item_ipv6 *)
7045                              items->mask)->hdr.proto) {
7046                                 item_ipv6_proto =
7047                                         ((const struct rte_flow_item_ipv6 *)
7048                                          items->spec)->hdr.proto;
7049                                 next_protocol =
7050                                         ((const struct rte_flow_item_ipv6 *)
7051                                          items->spec)->hdr.proto;
7052                                 next_protocol &=
7053                                         ((const struct rte_flow_item_ipv6 *)
7054                                          items->mask)->hdr.proto;
7055                         } else {
7056                                 /* Reset for inner layer. */
7057                                 next_protocol = 0xff;
7058                         }
7059                         break;
7060                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
7061                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
7062                                                                   item_flags,
7063                                                                   error);
7064                         if (ret < 0)
7065                                 return ret;
7066                         last_item = tunnel ?
7067                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
7068                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
7069                         if (items->mask != NULL &&
7070                             ((const struct rte_flow_item_ipv6_frag_ext *)
7071                              items->mask)->hdr.next_header) {
7072                                 next_protocol =
7073                                 ((const struct rte_flow_item_ipv6_frag_ext *)
7074                                  items->spec)->hdr.next_header;
7075                                 next_protocol &=
7076                                 ((const struct rte_flow_item_ipv6_frag_ext *)
7077                                  items->mask)->hdr.next_header;
7078                         } else {
7079                                 /* Reset for inner layer. */
7080                                 next_protocol = 0xff;
7081                         }
7082                         break;
7083                 case RTE_FLOW_ITEM_TYPE_TCP:
7084                         ret = mlx5_flow_validate_item_tcp
7085                                                 (items, item_flags,
7086                                                  next_protocol,
7087                                                  &nic_tcp_mask,
7088                                                  error);
7089                         if (ret < 0)
7090                                 return ret;
7091                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
7092                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
7093                         break;
7094                 case RTE_FLOW_ITEM_TYPE_UDP:
7095                         ret = mlx5_flow_validate_item_udp(items, item_flags,
7096                                                           next_protocol,
7097                                                           error);
7098                         if (ret < 0)
7099                                 return ret;
7100                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
7101                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
7102                         break;
7103                 case RTE_FLOW_ITEM_TYPE_GRE:
7104                         ret = mlx5_flow_validate_item_gre(items, item_flags,
7105                                                           next_protocol, error);
7106                         if (ret < 0)
7107                                 return ret;
7108                         gre_item = items;
7109                         last_item = MLX5_FLOW_LAYER_GRE;
7110                         break;
7111                 case RTE_FLOW_ITEM_TYPE_NVGRE:
7112                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
7113                                                             next_protocol,
7114                                                             error);
7115                         if (ret < 0)
7116                                 return ret;
7117                         last_item = MLX5_FLOW_LAYER_NVGRE;
7118                         break;
7119                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
7120                         ret = mlx5_flow_validate_item_gre_key
7121                                 (items, item_flags, gre_item, error);
7122                         if (ret < 0)
7123                                 return ret;
7124                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
7125                         break;
7126                 case RTE_FLOW_ITEM_TYPE_VXLAN:
7127                         ret = mlx5_flow_validate_item_vxlan(dev, items,
7128                                                             item_flags, attr,
7129                                                             error);
7130                         if (ret < 0)
7131                                 return ret;
7132                         last_item = MLX5_FLOW_LAYER_VXLAN;
7133                         break;
7134                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7135                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
7136                                                                 item_flags, dev,
7137                                                                 error);
7138                         if (ret < 0)
7139                                 return ret;
7140                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
7141                         break;
7142                 case RTE_FLOW_ITEM_TYPE_GENEVE:
7143                         ret = mlx5_flow_validate_item_geneve(items,
7144                                                              item_flags, dev,
7145                                                              error);
7146                         if (ret < 0)
7147                                 return ret;
7148                         geneve_item = items;
7149                         last_item = MLX5_FLOW_LAYER_GENEVE;
7150                         break;
7151                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
7152                         ret = mlx5_flow_validate_item_geneve_opt(items,
7153                                                                  last_item,
7154                                                                  geneve_item,
7155                                                                  dev,
7156                                                                  error);
7157                         if (ret < 0)
7158                                 return ret;
7159                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
7160                         break;
7161                 case RTE_FLOW_ITEM_TYPE_MPLS:
7162                         ret = mlx5_flow_validate_item_mpls(dev, items,
7163                                                            item_flags,
7164                                                            last_item, error);
7165                         if (ret < 0)
7166                                 return ret;
7167                         last_item = MLX5_FLOW_LAYER_MPLS;
7168                         break;
7169
7170                 case RTE_FLOW_ITEM_TYPE_MARK:
7171                         ret = flow_dv_validate_item_mark(dev, items, attr,
7172                                                          error);
7173                         if (ret < 0)
7174                                 return ret;
7175                         last_item = MLX5_FLOW_ITEM_MARK;
7176                         break;
7177                 case RTE_FLOW_ITEM_TYPE_META:
7178                         ret = flow_dv_validate_item_meta(dev, items, attr,
7179                                                          error);
7180                         if (ret < 0)
7181                                 return ret;
7182                         last_item = MLX5_FLOW_ITEM_METADATA;
7183                         break;
7184                 case RTE_FLOW_ITEM_TYPE_ICMP:
7185                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
7186                                                            next_protocol,
7187                                                            error);
7188                         if (ret < 0)
7189                                 return ret;
7190                         last_item = MLX5_FLOW_LAYER_ICMP;
7191                         break;
7192                 case RTE_FLOW_ITEM_TYPE_ICMP6:
7193                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
7194                                                             next_protocol,
7195                                                             error);
7196                         if (ret < 0)
7197                                 return ret;
7198                         item_ipv6_proto = IPPROTO_ICMPV6;
7199                         last_item = MLX5_FLOW_LAYER_ICMP6;
7200                         break;
7201                 case RTE_FLOW_ITEM_TYPE_TAG:
7202                         ret = flow_dv_validate_item_tag(dev, items,
7203                                                         attr, error);
7204                         if (ret < 0)
7205                                 return ret;
7206                         last_item = MLX5_FLOW_ITEM_TAG;
7207                         break;
7208                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
7209                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
7210                         break;
7211                 case RTE_FLOW_ITEM_TYPE_GTP:
7212                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
7213                                                         error);
7214                         if (ret < 0)
7215                                 return ret;
7216                         gtp_item = items;
7217                         last_item = MLX5_FLOW_LAYER_GTP;
7218                         break;
7219                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
7220                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
7221                                                             gtp_item, attr,
7222                                                             error);
7223                         if (ret < 0)
7224                                 return ret;
7225                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
7226                         break;
7227                 case RTE_FLOW_ITEM_TYPE_ECPRI:
7228                         /* Capacity will be checked in the translate stage. */
7229                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
7230                                                             last_item,
7231                                                             ether_type,
7232                                                             &nic_ecpri_mask,
7233                                                             error);
7234                         if (ret < 0)
7235                                 return ret;
7236                         last_item = MLX5_FLOW_LAYER_ECPRI;
7237                         break;
7238                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
7239                         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY)
7240                                 return rte_flow_error_set
7241                                         (error, ENOTSUP,
7242                                          RTE_FLOW_ERROR_TYPE_ITEM,
7243                                          NULL, "multiple integrity items not supported");
7244                         ret = flow_dv_validate_item_integrity(dev, rule_items,
7245                                                               items, error);
7246                         if (ret < 0)
7247                                 return ret;
7248                         last_item = MLX5_FLOW_ITEM_INTEGRITY;
7249                         break;
7250                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
7251                         ret = flow_dv_validate_item_aso_ct(dev, items,
7252                                                            &item_flags, error);
7253                         if (ret < 0)
7254                                 return ret;
7255                         break;
7256                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
7257                         /* tunnel offload item was processed before
7258                          * list it here as a supported type
7259                          */
7260                         break;
7261                 default:
7262                         return rte_flow_error_set(error, ENOTSUP,
7263                                                   RTE_FLOW_ERROR_TYPE_ITEM,
7264                                                   NULL, "item not supported");
7265                 }
7266                 item_flags |= last_item;
7267         }
7268         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
7269                 int type = actions->type;
7270                 bool shared_count = false;
7271
7272                 if (!mlx5_flow_os_action_supported(type))
7273                         return rte_flow_error_set(error, ENOTSUP,
7274                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7275                                                   actions,
7276                                                   "action not supported");
7277                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
7278                         return rte_flow_error_set(error, ENOTSUP,
7279                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7280                                                   actions, "too many actions");
7281                 if (action_flags &
7282                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
7283                         return rte_flow_error_set(error, ENOTSUP,
7284                                 RTE_FLOW_ERROR_TYPE_ACTION,
7285                                 NULL, "meter action with policy "
7286                                 "must be the last action");
7287                 switch (type) {
7288                 case RTE_FLOW_ACTION_TYPE_VOID:
7289                         break;
7290                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7291                         ret = flow_dv_validate_action_port_id(dev,
7292                                                               action_flags,
7293                                                               actions,
7294                                                               attr,
7295                                                               error);
7296                         if (ret)
7297                                 return ret;
7298                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7299                         ++actions_n;
7300                         break;
7301                 case RTE_FLOW_ACTION_TYPE_FLAG:
7302                         ret = flow_dv_validate_action_flag(dev, action_flags,
7303                                                            attr, error);
7304                         if (ret < 0)
7305                                 return ret;
7306                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7307                                 /* Count all modify-header actions as one. */
7308                                 if (!(action_flags &
7309                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7310                                         ++actions_n;
7311                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
7312                                                 MLX5_FLOW_ACTION_MARK_EXT;
7313                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7314                                         modify_after_mirror = 1;
7315
7316                         } else {
7317                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
7318                                 ++actions_n;
7319                         }
7320                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7321                         break;
7322                 case RTE_FLOW_ACTION_TYPE_MARK:
7323                         ret = flow_dv_validate_action_mark(dev, actions,
7324                                                            action_flags,
7325                                                            attr, error);
7326                         if (ret < 0)
7327                                 return ret;
7328                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7329                                 /* Count all modify-header actions as one. */
7330                                 if (!(action_flags &
7331                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7332                                         ++actions_n;
7333                                 action_flags |= MLX5_FLOW_ACTION_MARK |
7334                                                 MLX5_FLOW_ACTION_MARK_EXT;
7335                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7336                                         modify_after_mirror = 1;
7337                         } else {
7338                                 action_flags |= MLX5_FLOW_ACTION_MARK;
7339                                 ++actions_n;
7340                         }
7341                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7342                         break;
7343                 case RTE_FLOW_ACTION_TYPE_SET_META:
7344                         ret = flow_dv_validate_action_set_meta(dev, actions,
7345                                                                action_flags,
7346                                                                attr, error);
7347                         if (ret < 0)
7348                                 return ret;
7349                         /* Count all modify-header actions as one action. */
7350                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7351                                 ++actions_n;
7352                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7353                                 modify_after_mirror = 1;
7354                         action_flags |= MLX5_FLOW_ACTION_SET_META;
7355                         rw_act_num += MLX5_ACT_NUM_SET_META;
7356                         break;
7357                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7358                         ret = flow_dv_validate_action_set_tag(dev, actions,
7359                                                               action_flags,
7360                                                               attr, error);
7361                         if (ret < 0)
7362                                 return ret;
7363                         /* Count all modify-header actions as one action. */
7364                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7365                                 ++actions_n;
7366                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7367                                 modify_after_mirror = 1;
7368                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7369                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7370                         break;
7371                 case RTE_FLOW_ACTION_TYPE_DROP:
7372                         ret = mlx5_flow_validate_action_drop(action_flags,
7373                                                              attr, error);
7374                         if (ret < 0)
7375                                 return ret;
7376                         action_flags |= MLX5_FLOW_ACTION_DROP;
7377                         ++actions_n;
7378                         break;
7379                 case RTE_FLOW_ACTION_TYPE_QUEUE:
7380                         ret = mlx5_flow_validate_action_queue(actions,
7381                                                               action_flags, dev,
7382                                                               attr, error);
7383                         if (ret < 0)
7384                                 return ret;
7385                         queue_index = ((const struct rte_flow_action_queue *)
7386                                                         (actions->conf))->index;
7387                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
7388                         ++actions_n;
7389                         break;
7390                 case RTE_FLOW_ACTION_TYPE_RSS:
7391                         rss = actions->conf;
7392                         ret = mlx5_flow_validate_action_rss(actions,
7393                                                             action_flags, dev,
7394                                                             attr, item_flags,
7395                                                             error);
7396                         if (ret < 0)
7397                                 return ret;
7398                         if (rss && sample_rss &&
7399                             (sample_rss->level != rss->level ||
7400                             sample_rss->types != rss->types))
7401                                 return rte_flow_error_set(error, ENOTSUP,
7402                                         RTE_FLOW_ERROR_TYPE_ACTION,
7403                                         NULL,
7404                                         "Can't use the different RSS types "
7405                                         "or level in the same flow");
7406                         if (rss != NULL && rss->queue_num)
7407                                 queue_index = rss->queue[0];
7408                         action_flags |= MLX5_FLOW_ACTION_RSS;
7409                         ++actions_n;
7410                         break;
7411                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7412                         ret =
7413                         mlx5_flow_validate_action_default_miss(action_flags,
7414                                         attr, error);
7415                         if (ret < 0)
7416                                 return ret;
7417                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7418                         ++actions_n;
7419                         break;
7420                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
7421                 case RTE_FLOW_ACTION_TYPE_COUNT:
7422                         shared_count = is_shared_action_count(actions);
7423                         ret = flow_dv_validate_action_count(dev, shared_count,
7424                                                             action_flags,
7425                                                             error);
7426                         if (ret < 0)
7427                                 return ret;
7428                         action_flags |= MLX5_FLOW_ACTION_COUNT;
7429                         ++actions_n;
7430                         break;
7431                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7432                         if (flow_dv_validate_action_pop_vlan(dev,
7433                                                              action_flags,
7434                                                              actions,
7435                                                              item_flags, attr,
7436                                                              error))
7437                                 return -rte_errno;
7438                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7439                                 modify_after_mirror = 1;
7440                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7441                         ++actions_n;
7442                         break;
7443                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7444                         ret = flow_dv_validate_action_push_vlan(dev,
7445                                                                 action_flags,
7446                                                                 vlan_m,
7447                                                                 actions, attr,
7448                                                                 error);
7449                         if (ret < 0)
7450                                 return ret;
7451                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7452                                 modify_after_mirror = 1;
7453                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7454                         ++actions_n;
7455                         break;
7456                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7457                         ret = flow_dv_validate_action_set_vlan_pcp
7458                                                 (action_flags, actions, error);
7459                         if (ret < 0)
7460                                 return ret;
7461                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7462                                 modify_after_mirror = 1;
7463                         /* Count PCP with push_vlan command. */
7464                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
7465                         break;
7466                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7467                         ret = flow_dv_validate_action_set_vlan_vid
7468                                                 (item_flags, action_flags,
7469                                                  actions, error);
7470                         if (ret < 0)
7471                                 return ret;
7472                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7473                                 modify_after_mirror = 1;
7474                         /* Count VID with push_vlan command. */
7475                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7476                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
7477                         break;
7478                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7479                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7480                         ret = flow_dv_validate_action_l2_encap(dev,
7481                                                                action_flags,
7482                                                                actions, attr,
7483                                                                error);
7484                         if (ret < 0)
7485                                 return ret;
7486                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
7487                         ++actions_n;
7488                         break;
7489                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7490                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7491                         ret = flow_dv_validate_action_decap(dev, action_flags,
7492                                                             actions, item_flags,
7493                                                             attr, error);
7494                         if (ret < 0)
7495                                 return ret;
7496                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7497                                 modify_after_mirror = 1;
7498                         action_flags |= MLX5_FLOW_ACTION_DECAP;
7499                         ++actions_n;
7500                         break;
7501                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7502                         ret = flow_dv_validate_action_raw_encap_decap
7503                                 (dev, NULL, actions->conf, attr, &action_flags,
7504                                  &actions_n, actions, item_flags, error);
7505                         if (ret < 0)
7506                                 return ret;
7507                         break;
7508                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7509                         decap = actions->conf;
7510                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
7511                                 ;
7512                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7513                                 encap = NULL;
7514                                 actions--;
7515                         } else {
7516                                 encap = actions->conf;
7517                         }
7518                         ret = flow_dv_validate_action_raw_encap_decap
7519                                            (dev,
7520                                             decap ? decap : &empty_decap, encap,
7521                                             attr, &action_flags, &actions_n,
7522                                             actions, item_flags, error);
7523                         if (ret < 0)
7524                                 return ret;
7525                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7526                             (action_flags & MLX5_FLOW_ACTION_DECAP))
7527                                 modify_after_mirror = 1;
7528                         break;
7529                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7530                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7531                         ret = flow_dv_validate_action_modify_mac(action_flags,
7532                                                                  actions,
7533                                                                  item_flags,
7534                                                                  error);
7535                         if (ret < 0)
7536                                 return ret;
7537                         /* Count all modify-header actions as one action. */
7538                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7539                                 ++actions_n;
7540                         action_flags |= actions->type ==
7541                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7542                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
7543                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
7544                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7545                                 modify_after_mirror = 1;
7546                         /*
7547                          * Even if the source and destination MAC addresses have
7548                          * overlap in the header with 4B alignment, the convert
7549                          * function will handle them separately and 4 SW actions
7550                          * will be created. And 2 actions will be added each
7551                          * time no matter how many bytes of address will be set.
7552                          */
7553                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
7554                         break;
7555                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7556                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7557                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
7558                                                                   actions,
7559                                                                   item_flags,
7560                                                                   error);
7561                         if (ret < 0)
7562                                 return ret;
7563                         /* Count all modify-header actions as one action. */
7564                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7565                                 ++actions_n;
7566                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7567                                 modify_after_mirror = 1;
7568                         action_flags |= actions->type ==
7569                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7570                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
7571                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
7572                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
7573                         break;
7574                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7575                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7576                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
7577                                                                   actions,
7578                                                                   item_flags,
7579                                                                   error);
7580                         if (ret < 0)
7581                                 return ret;
7582                         if (item_ipv6_proto == IPPROTO_ICMPV6)
7583                                 return rte_flow_error_set(error, ENOTSUP,
7584                                         RTE_FLOW_ERROR_TYPE_ACTION,
7585                                         actions,
7586                                         "Can't change header "
7587                                         "with ICMPv6 proto");
7588                         /* Count all modify-header actions as one action. */
7589                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7590                                 ++actions_n;
7591                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7592                                 modify_after_mirror = 1;
7593                         action_flags |= actions->type ==
7594                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7595                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7596                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
7597                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
7598                         break;
7599                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7600                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7601                         ret = flow_dv_validate_action_modify_tp(action_flags,
7602                                                                 actions,
7603                                                                 item_flags,
7604                                                                 error);
7605                         if (ret < 0)
7606                                 return ret;
7607                         /* Count all modify-header actions as one action. */
7608                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7609                                 ++actions_n;
7610                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7611                                 modify_after_mirror = 1;
7612                         action_flags |= actions->type ==
7613                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7614                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
7615                                                 MLX5_FLOW_ACTION_SET_TP_DST;
7616                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7617                         break;
7618                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7619                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7620                         ret = flow_dv_validate_action_modify_ttl(action_flags,
7621                                                                  actions,
7622                                                                  item_flags,
7623                                                                  error);
7624                         if (ret < 0)
7625                                 return ret;
7626                         /* Count all modify-header actions as one action. */
7627                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7628                                 ++actions_n;
7629                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7630                                 modify_after_mirror = 1;
7631                         action_flags |= actions->type ==
7632                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
7633                                                 MLX5_FLOW_ACTION_SET_TTL :
7634                                                 MLX5_FLOW_ACTION_DEC_TTL;
7635                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7636                         break;
7637                 case RTE_FLOW_ACTION_TYPE_JUMP:
7638                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7639                                                            action_flags,
7640                                                            attr, external,
7641                                                            error);
7642                         if (ret)
7643                                 return ret;
7644                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7645                             fdb_mirror_limit)
7646                                 return rte_flow_error_set(error, EINVAL,
7647                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7648                                                   NULL,
7649                                                   "sample and jump action combination is not supported");
7650                         ++actions_n;
7651                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7652                         break;
7653                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7654                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7655                         ret = flow_dv_validate_action_modify_tcp_seq
7656                                                                 (action_flags,
7657                                                                  actions,
7658                                                                  item_flags,
7659                                                                  error);
7660                         if (ret < 0)
7661                                 return ret;
7662                         /* Count all modify-header actions as one action. */
7663                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7664                                 ++actions_n;
7665                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7666                                 modify_after_mirror = 1;
7667                         action_flags |= actions->type ==
7668                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7669                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7670                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7671                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7672                         break;
7673                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7674                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7675                         ret = flow_dv_validate_action_modify_tcp_ack
7676                                                                 (action_flags,
7677                                                                  actions,
7678                                                                  item_flags,
7679                                                                  error);
7680                         if (ret < 0)
7681                                 return ret;
7682                         /* Count all modify-header actions as one action. */
7683                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7684                                 ++actions_n;
7685                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7686                                 modify_after_mirror = 1;
7687                         action_flags |= actions->type ==
7688                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7689                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
7690                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7691                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7692                         break;
7693                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7694                         break;
7695                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7696                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7697                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7698                         break;
7699                 case RTE_FLOW_ACTION_TYPE_METER:
7700                         ret = mlx5_flow_validate_action_meter(dev,
7701                                                               action_flags,
7702                                                               actions, attr,
7703                                                               port_id_item,
7704                                                               &def_policy,
7705                                                               error);
7706                         if (ret < 0)
7707                                 return ret;
7708                         action_flags |= MLX5_FLOW_ACTION_METER;
7709                         if (!def_policy)
7710                                 action_flags |=
7711                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7712                         ++actions_n;
7713                         /* Meter action will add one more TAG action. */
7714                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7715                         break;
7716                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7717                         if (!attr->transfer && !attr->group)
7718                                 return rte_flow_error_set(error, ENOTSUP,
7719                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7720                                                                            NULL,
7721                           "Shared ASO age action is not supported for group 0");
7722                         if (action_flags & MLX5_FLOW_ACTION_AGE)
7723                                 return rte_flow_error_set
7724                                                   (error, EINVAL,
7725                                                    RTE_FLOW_ERROR_TYPE_ACTION,
7726                                                    NULL,
7727                                                    "duplicate age actions set");
7728                         action_flags |= MLX5_FLOW_ACTION_AGE;
7729                         ++actions_n;
7730                         break;
7731                 case RTE_FLOW_ACTION_TYPE_AGE:
7732                         ret = flow_dv_validate_action_age(action_flags,
7733                                                           actions, dev,
7734                                                           error);
7735                         if (ret < 0)
7736                                 return ret;
7737                         /*
7738                          * Validate the regular AGE action (using counter)
7739                          * mutual exclusion with share counter actions.
7740                          */
7741                         if (!priv->sh->flow_hit_aso_en) {
7742                                 if (shared_count)
7743                                         return rte_flow_error_set
7744                                                 (error, EINVAL,
7745                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7746                                                 NULL,
7747                                                 "old age and shared count combination is not supported");
7748                                 if (sample_count)
7749                                         return rte_flow_error_set
7750                                                 (error, EINVAL,
7751                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7752                                                 NULL,
7753                                                 "old age action and count must be in the same sub flow");
7754                         }
7755                         action_flags |= MLX5_FLOW_ACTION_AGE;
7756                         ++actions_n;
7757                         break;
7758                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7759                         ret = flow_dv_validate_action_modify_ipv4_dscp
7760                                                          (action_flags,
7761                                                           actions,
7762                                                           item_flags,
7763                                                           error);
7764                         if (ret < 0)
7765                                 return ret;
7766                         /* Count all modify-header actions as one action. */
7767                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7768                                 ++actions_n;
7769                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7770                                 modify_after_mirror = 1;
7771                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7772                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7773                         break;
7774                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7775                         ret = flow_dv_validate_action_modify_ipv6_dscp
7776                                                                 (action_flags,
7777                                                                  actions,
7778                                                                  item_flags,
7779                                                                  error);
7780                         if (ret < 0)
7781                                 return ret;
7782                         /* Count all modify-header actions as one action. */
7783                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7784                                 ++actions_n;
7785                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7786                                 modify_after_mirror = 1;
7787                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7788                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7789                         break;
7790                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7791                         ret = flow_dv_validate_action_sample(&action_flags,
7792                                                              actions, dev,
7793                                                              attr, item_flags,
7794                                                              rss, &sample_rss,
7795                                                              &sample_count,
7796                                                              &fdb_mirror_limit,
7797                                                              error);
7798                         if (ret < 0)
7799                                 return ret;
7800                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7801                         ++actions_n;
7802                         break;
7803                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7804                         ret = flow_dv_validate_action_modify_field(dev,
7805                                                                    action_flags,
7806                                                                    actions,
7807                                                                    attr,
7808                                                                    error);
7809                         if (ret < 0)
7810                                 return ret;
7811                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7812                                 modify_after_mirror = 1;
7813                         /* Count all modify-header actions as one action. */
7814                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7815                                 ++actions_n;
7816                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7817                         rw_act_num += ret;
7818                         break;
7819                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7820                         ret = flow_dv_validate_action_aso_ct(dev, action_flags,
7821                                                              item_flags, attr,
7822                                                              error);
7823                         if (ret < 0)
7824                                 return ret;
7825                         action_flags |= MLX5_FLOW_ACTION_CT;
7826                         break;
7827                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7828                         /* tunnel offload action was processed before
7829                          * list it here as a supported type
7830                          */
7831                         break;
7832                 default:
7833                         return rte_flow_error_set(error, ENOTSUP,
7834                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7835                                                   actions,
7836                                                   "action not supported");
7837                 }
7838         }
7839         /*
7840          * Validate actions in flow rules
7841          * - Explicit decap action is prohibited by the tunnel offload API.
7842          * - Drop action in tunnel steer rule is prohibited by the API.
7843          * - Application cannot use MARK action because it's value can mask
7844          *   tunnel default miss nitification.
7845          * - JUMP in tunnel match rule has no support in current PMD
7846          *   implementation.
7847          * - TAG & META are reserved for future uses.
7848          */
7849         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7850                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
7851                                             MLX5_FLOW_ACTION_MARK     |
7852                                             MLX5_FLOW_ACTION_SET_TAG  |
7853                                             MLX5_FLOW_ACTION_SET_META |
7854                                             MLX5_FLOW_ACTION_DROP;
7855
7856                 if (action_flags & bad_actions_mask)
7857                         return rte_flow_error_set
7858                                         (error, EINVAL,
7859                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7860                                         "Invalid RTE action in tunnel "
7861                                         "set decap rule");
7862                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7863                         return rte_flow_error_set
7864                                         (error, EINVAL,
7865                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7866                                         "tunnel set decap rule must terminate "
7867                                         "with JUMP");
7868                 if (!attr->ingress)
7869                         return rte_flow_error_set
7870                                         (error, EINVAL,
7871                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7872                                         "tunnel flows for ingress traffic only");
7873         }
7874         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7875                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
7876                                             MLX5_FLOW_ACTION_MARK    |
7877                                             MLX5_FLOW_ACTION_SET_TAG |
7878                                             MLX5_FLOW_ACTION_SET_META;
7879
7880                 if (action_flags & bad_actions_mask)
7881                         return rte_flow_error_set
7882                                         (error, EINVAL,
7883                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7884                                         "Invalid RTE action in tunnel "
7885                                         "set match rule");
7886         }
7887         /*
7888          * Validate the drop action mutual exclusion with other actions.
7889          * Drop action is mutually-exclusive with any other action, except for
7890          * Count action.
7891          * Drop action compatibility with tunnel offload was already validated.
7892          */
7893         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7894                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
7895         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7896             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7897                 return rte_flow_error_set(error, EINVAL,
7898                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7899                                           "Drop action is mutually-exclusive "
7900                                           "with any other action, except for "
7901                                           "Count action");
7902         /* Eswitch has few restrictions on using items and actions */
7903         if (attr->transfer) {
7904                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7905                     action_flags & MLX5_FLOW_ACTION_FLAG)
7906                         return rte_flow_error_set(error, ENOTSUP,
7907                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7908                                                   NULL,
7909                                                   "unsupported action FLAG");
7910                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7911                     action_flags & MLX5_FLOW_ACTION_MARK)
7912                         return rte_flow_error_set(error, ENOTSUP,
7913                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7914                                                   NULL,
7915                                                   "unsupported action MARK");
7916                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7917                         return rte_flow_error_set(error, ENOTSUP,
7918                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7919                                                   NULL,
7920                                                   "unsupported action QUEUE");
7921                 if (action_flags & MLX5_FLOW_ACTION_RSS)
7922                         return rte_flow_error_set(error, ENOTSUP,
7923                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7924                                                   NULL,
7925                                                   "unsupported action RSS");
7926                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7927                         return rte_flow_error_set(error, EINVAL,
7928                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7929                                                   actions,
7930                                                   "no fate action is found");
7931         } else {
7932                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7933                         return rte_flow_error_set(error, EINVAL,
7934                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7935                                                   actions,
7936                                                   "no fate action is found");
7937         }
7938         /*
7939          * Continue validation for Xcap and VLAN actions.
7940          * If hairpin is working in explicit TX rule mode, there is no actions
7941          * splitting and the validation of hairpin ingress flow should be the
7942          * same as other standard flows.
7943          */
7944         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7945                              MLX5_FLOW_VLAN_ACTIONS)) &&
7946             (queue_index == 0xFFFF ||
7947              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7948              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7949              conf->tx_explicit != 0))) {
7950                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7951                     MLX5_FLOW_XCAP_ACTIONS)
7952                         return rte_flow_error_set(error, ENOTSUP,
7953                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7954                                                   NULL, "encap and decap "
7955                                                   "combination aren't supported");
7956                 if (!attr->transfer && attr->ingress) {
7957                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7958                                 return rte_flow_error_set
7959                                                 (error, ENOTSUP,
7960                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7961                                                  NULL, "encap is not supported"
7962                                                  " for ingress traffic");
7963                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7964                                 return rte_flow_error_set
7965                                                 (error, ENOTSUP,
7966                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7967                                                  NULL, "push VLAN action not "
7968                                                  "supported for ingress");
7969                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7970                                         MLX5_FLOW_VLAN_ACTIONS)
7971                                 return rte_flow_error_set
7972                                                 (error, ENOTSUP,
7973                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7974                                                  NULL, "no support for "
7975                                                  "multiple VLAN actions");
7976                 }
7977         }
7978         if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
7979                 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
7980                         ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
7981                         attr->ingress)
7982                         return rte_flow_error_set
7983                                 (error, ENOTSUP,
7984                                 RTE_FLOW_ERROR_TYPE_ACTION,
7985                                 NULL, "fate action not supported for "
7986                                 "meter with policy");
7987                 if (attr->egress) {
7988                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
7989                                 return rte_flow_error_set
7990                                         (error, ENOTSUP,
7991                                         RTE_FLOW_ERROR_TYPE_ACTION,
7992                                         NULL, "modify header action in egress "
7993                                         "cannot be done before meter action");
7994                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7995                                 return rte_flow_error_set
7996                                         (error, ENOTSUP,
7997                                         RTE_FLOW_ERROR_TYPE_ACTION,
7998                                         NULL, "encap action in egress "
7999                                         "cannot be done before meter action");
8000                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
8001                                 return rte_flow_error_set
8002                                         (error, ENOTSUP,
8003                                         RTE_FLOW_ERROR_TYPE_ACTION,
8004                                         NULL, "push vlan action in egress "
8005                                         "cannot be done before meter action");
8006                 }
8007         }
8008         /*
8009          * Hairpin flow will add one more TAG action in TX implicit mode.
8010          * In TX explicit mode, there will be no hairpin flow ID.
8011          */
8012         if (hairpin > 0)
8013                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
8014         /* extra metadata enabled: one more TAG action will be add. */
8015         if (dev_conf->dv_flow_en &&
8016             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
8017             mlx5_flow_ext_mreg_supported(dev))
8018                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
8019         if (rw_act_num >
8020                         flow_dv_modify_hdr_action_max(dev, is_root)) {
8021                 return rte_flow_error_set(error, ENOTSUP,
8022                                           RTE_FLOW_ERROR_TYPE_ACTION,
8023                                           NULL, "too many header modify"
8024                                           " actions to support");
8025         }
8026         /* Eswitch egress mirror and modify flow has limitation on CX5 */
8027         if (fdb_mirror_limit && modify_after_mirror)
8028                 return rte_flow_error_set(error, EINVAL,
8029                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
8030                                 "sample before modify action is not supported");
8031         return 0;
8032 }
8033
8034 /**
8035  * Internal preparation function. Allocates the DV flow size,
8036  * this size is constant.
8037  *
8038  * @param[in] dev
8039  *   Pointer to the rte_eth_dev structure.
8040  * @param[in] attr
8041  *   Pointer to the flow attributes.
8042  * @param[in] items
8043  *   Pointer to the list of items.
8044  * @param[in] actions
8045  *   Pointer to the list of actions.
8046  * @param[out] error
8047  *   Pointer to the error structure.
8048  *
8049  * @return
8050  *   Pointer to mlx5_flow object on success,
8051  *   otherwise NULL and rte_errno is set.
8052  */
8053 static struct mlx5_flow *
8054 flow_dv_prepare(struct rte_eth_dev *dev,
8055                 const struct rte_flow_attr *attr __rte_unused,
8056                 const struct rte_flow_item items[] __rte_unused,
8057                 const struct rte_flow_action actions[] __rte_unused,
8058                 struct rte_flow_error *error)
8059 {
8060         uint32_t handle_idx = 0;
8061         struct mlx5_flow *dev_flow;
8062         struct mlx5_flow_handle *dev_handle;
8063         struct mlx5_priv *priv = dev->data->dev_private;
8064         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
8065
8066         MLX5_ASSERT(wks);
8067         wks->skip_matcher_reg = 0;
8068         wks->policy = NULL;
8069         wks->final_policy = NULL;
8070         /* In case of corrupting the memory. */
8071         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
8072                 rte_flow_error_set(error, ENOSPC,
8073                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8074                                    "not free temporary device flow");
8075                 return NULL;
8076         }
8077         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
8078                                    &handle_idx);
8079         if (!dev_handle) {
8080                 rte_flow_error_set(error, ENOMEM,
8081                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8082                                    "not enough memory to create flow handle");
8083                 return NULL;
8084         }
8085         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
8086         dev_flow = &wks->flows[wks->flow_idx++];
8087         memset(dev_flow, 0, sizeof(*dev_flow));
8088         dev_flow->handle = dev_handle;
8089         dev_flow->handle_idx = handle_idx;
8090         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
8091         dev_flow->ingress = attr->ingress;
8092         dev_flow->dv.transfer = attr->transfer;
8093         return dev_flow;
8094 }
8095
8096 #ifdef RTE_LIBRTE_MLX5_DEBUG
8097 /**
8098  * Sanity check for match mask and value. Similar to check_valid_spec() in
8099  * kernel driver. If unmasked bit is present in value, it returns failure.
8100  *
8101  * @param match_mask
8102  *   pointer to match mask buffer.
8103  * @param match_value
8104  *   pointer to match value buffer.
8105  *
8106  * @return
8107  *   0 if valid, -EINVAL otherwise.
8108  */
8109 static int
8110 flow_dv_check_valid_spec(void *match_mask, void *match_value)
8111 {
8112         uint8_t *m = match_mask;
8113         uint8_t *v = match_value;
8114         unsigned int i;
8115
8116         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
8117                 if (v[i] & ~m[i]) {
8118                         DRV_LOG(ERR,
8119                                 "match_value differs from match_criteria"
8120                                 " %p[%u] != %p[%u]",
8121                                 match_value, i, match_mask, i);
8122                         return -EINVAL;
8123                 }
8124         }
8125         return 0;
8126 }
8127 #endif
8128
8129 /**
8130  * Add match of ip_version.
8131  *
8132  * @param[in] group
8133  *   Flow group.
8134  * @param[in] headers_v
8135  *   Values header pointer.
8136  * @param[in] headers_m
8137  *   Masks header pointer.
8138  * @param[in] ip_version
8139  *   The IP version to set.
8140  */
8141 static inline void
8142 flow_dv_set_match_ip_version(uint32_t group,
8143                              void *headers_v,
8144                              void *headers_m,
8145                              uint8_t ip_version)
8146 {
8147         if (group == 0)
8148                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
8149         else
8150                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
8151                          ip_version);
8152         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
8153         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
8154         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
8155 }
8156
8157 /**
8158  * Add Ethernet item to matcher and to the value.
8159  *
8160  * @param[in, out] matcher
8161  *   Flow matcher.
8162  * @param[in, out] key
8163  *   Flow matcher value.
8164  * @param[in] item
8165  *   Flow pattern to translate.
8166  * @param[in] inner
8167  *   Item is inner pattern.
8168  */
8169 static void
8170 flow_dv_translate_item_eth(void *matcher, void *key,
8171                            const struct rte_flow_item *item, int inner,
8172                            uint32_t group)
8173 {
8174         const struct rte_flow_item_eth *eth_m = item->mask;
8175         const struct rte_flow_item_eth *eth_v = item->spec;
8176         const struct rte_flow_item_eth nic_mask = {
8177                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8178                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8179                 .type = RTE_BE16(0xffff),
8180                 .has_vlan = 0,
8181         };
8182         void *hdrs_m;
8183         void *hdrs_v;
8184         char *l24_v;
8185         unsigned int i;
8186
8187         if (!eth_v)
8188                 return;
8189         if (!eth_m)
8190                 eth_m = &nic_mask;
8191         if (inner) {
8192                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8193                                          inner_headers);
8194                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8195         } else {
8196                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8197                                          outer_headers);
8198                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8199         }
8200         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
8201                &eth_m->dst, sizeof(eth_m->dst));
8202         /* The value must be in the range of the mask. */
8203         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
8204         for (i = 0; i < sizeof(eth_m->dst); ++i)
8205                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
8206         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
8207                &eth_m->src, sizeof(eth_m->src));
8208         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
8209         /* The value must be in the range of the mask. */
8210         for (i = 0; i < sizeof(eth_m->dst); ++i)
8211                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
8212         /*
8213          * HW supports match on one Ethertype, the Ethertype following the last
8214          * VLAN tag of the packet (see PRM).
8215          * Set match on ethertype only if ETH header is not followed by VLAN.
8216          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8217          * ethertype, and use ip_version field instead.
8218          * eCPRI over Ether layer will use type value 0xAEFE.
8219          */
8220         if (eth_m->type == 0xFFFF) {
8221                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
8222                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8223                 switch (eth_v->type) {
8224                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8225                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8226                         return;
8227                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
8228                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8229                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8230                         return;
8231                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8232                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8233                         return;
8234                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8235                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8236                         return;
8237                 default:
8238                         break;
8239                 }
8240         }
8241         if (eth_m->has_vlan) {
8242                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8243                 if (eth_v->has_vlan) {
8244                         /*
8245                          * Here, when also has_more_vlan field in VLAN item is
8246                          * not set, only single-tagged packets will be matched.
8247                          */
8248                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8249                         return;
8250                 }
8251         }
8252         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8253                  rte_be_to_cpu_16(eth_m->type));
8254         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
8255         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
8256 }
8257
8258 /**
8259  * Add VLAN item to matcher and to the value.
8260  *
8261  * @param[in, out] dev_flow
8262  *   Flow descriptor.
8263  * @param[in, out] matcher
8264  *   Flow matcher.
8265  * @param[in, out] key
8266  *   Flow matcher value.
8267  * @param[in] item
8268  *   Flow pattern to translate.
8269  * @param[in] inner
8270  *   Item is inner pattern.
8271  */
8272 static void
8273 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
8274                             void *matcher, void *key,
8275                             const struct rte_flow_item *item,
8276                             int inner, uint32_t group)
8277 {
8278         const struct rte_flow_item_vlan *vlan_m = item->mask;
8279         const struct rte_flow_item_vlan *vlan_v = item->spec;
8280         void *hdrs_m;
8281         void *hdrs_v;
8282         uint16_t tci_m;
8283         uint16_t tci_v;
8284
8285         if (inner) {
8286                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8287                                          inner_headers);
8288                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8289         } else {
8290                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8291                                          outer_headers);
8292                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8293                 /*
8294                  * This is workaround, masks are not supported,
8295                  * and pre-validated.
8296                  */
8297                 if (vlan_v)
8298                         dev_flow->handle->vf_vlan.tag =
8299                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
8300         }
8301         /*
8302          * When VLAN item exists in flow, mark packet as tagged,
8303          * even if TCI is not specified.
8304          */
8305         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
8306                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8307                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8308         }
8309         if (!vlan_v)
8310                 return;
8311         if (!vlan_m)
8312                 vlan_m = &rte_flow_item_vlan_mask;
8313         tci_m = rte_be_to_cpu_16(vlan_m->tci);
8314         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
8315         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
8316         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
8317         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
8318         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
8319         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
8320         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
8321         /*
8322          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8323          * ethertype, and use ip_version field instead.
8324          */
8325         if (vlan_m->inner_type == 0xFFFF) {
8326                 switch (vlan_v->inner_type) {
8327                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8328                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8329                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8330                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8331                         return;
8332                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8333                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8334                         return;
8335                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8336                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8337                         return;
8338                 default:
8339                         break;
8340                 }
8341         }
8342         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
8343                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8344                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8345                 /* Only one vlan_tag bit can be set. */
8346                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8347                 return;
8348         }
8349         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8350                  rte_be_to_cpu_16(vlan_m->inner_type));
8351         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
8352                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
8353 }
8354
8355 /**
8356  * Add IPV4 item to matcher and to the value.
8357  *
8358  * @param[in, out] matcher
8359  *   Flow matcher.
8360  * @param[in, out] key
8361  *   Flow matcher value.
8362  * @param[in] item
8363  *   Flow pattern to translate.
8364  * @param[in] inner
8365  *   Item is inner pattern.
8366  * @param[in] group
8367  *   The group to insert the rule.
8368  */
8369 static void
8370 flow_dv_translate_item_ipv4(void *matcher, void *key,
8371                             const struct rte_flow_item *item,
8372                             int inner, uint32_t group)
8373 {
8374         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
8375         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
8376         const struct rte_flow_item_ipv4 nic_mask = {
8377                 .hdr = {
8378                         .src_addr = RTE_BE32(0xffffffff),
8379                         .dst_addr = RTE_BE32(0xffffffff),
8380                         .type_of_service = 0xff,
8381                         .next_proto_id = 0xff,
8382                         .time_to_live = 0xff,
8383                 },
8384         };
8385         void *headers_m;
8386         void *headers_v;
8387         char *l24_m;
8388         char *l24_v;
8389         uint8_t tos, ihl_m, ihl_v;
8390
8391         if (inner) {
8392                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8393                                          inner_headers);
8394                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8395         } else {
8396                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8397                                          outer_headers);
8398                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8399         }
8400         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
8401         if (!ipv4_v)
8402                 return;
8403         if (!ipv4_m)
8404                 ipv4_m = &nic_mask;
8405         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8406                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8407         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8408                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8409         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
8410         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
8411         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8412                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8413         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8414                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8415         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
8416         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
8417         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
8418         ihl_m = ipv4_m->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
8419         ihl_v = ipv4_v->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
8420         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_ihl, ihl_m);
8421         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_ihl, ihl_m & ihl_v);
8422         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
8423                  ipv4_m->hdr.type_of_service);
8424         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
8425         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
8426                  ipv4_m->hdr.type_of_service >> 2);
8427         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
8428         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8429                  ipv4_m->hdr.next_proto_id);
8430         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8431                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
8432         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8433                  ipv4_m->hdr.time_to_live);
8434         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8435                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
8436         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8437                  !!(ipv4_m->hdr.fragment_offset));
8438         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8439                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
8440 }
8441
8442 /**
8443  * Add IPV6 item to matcher and to the value.
8444  *
8445  * @param[in, out] matcher
8446  *   Flow matcher.
8447  * @param[in, out] key
8448  *   Flow matcher value.
8449  * @param[in] item
8450  *   Flow pattern to translate.
8451  * @param[in] inner
8452  *   Item is inner pattern.
8453  * @param[in] group
8454  *   The group to insert the rule.
8455  */
8456 static void
8457 flow_dv_translate_item_ipv6(void *matcher, void *key,
8458                             const struct rte_flow_item *item,
8459                             int inner, uint32_t group)
8460 {
8461         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
8462         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
8463         const struct rte_flow_item_ipv6 nic_mask = {
8464                 .hdr = {
8465                         .src_addr =
8466                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8467                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8468                         .dst_addr =
8469                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8470                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8471                         .vtc_flow = RTE_BE32(0xffffffff),
8472                         .proto = 0xff,
8473                         .hop_limits = 0xff,
8474                 },
8475         };
8476         void *headers_m;
8477         void *headers_v;
8478         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8479         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8480         char *l24_m;
8481         char *l24_v;
8482         uint32_t vtc_m;
8483         uint32_t vtc_v;
8484         int i;
8485         int size;
8486
8487         if (inner) {
8488                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8489                                          inner_headers);
8490                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8491         } else {
8492                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8493                                          outer_headers);
8494                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8495         }
8496         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
8497         if (!ipv6_v)
8498                 return;
8499         if (!ipv6_m)
8500                 ipv6_m = &nic_mask;
8501         size = sizeof(ipv6_m->hdr.dst_addr);
8502         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8503                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8504         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8505                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8506         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
8507         for (i = 0; i < size; ++i)
8508                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
8509         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8510                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8511         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8512                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8513         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
8514         for (i = 0; i < size; ++i)
8515                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
8516         /* TOS. */
8517         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
8518         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
8519         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
8520         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
8521         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
8522         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
8523         /* Label. */
8524         if (inner) {
8525                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
8526                          vtc_m);
8527                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
8528                          vtc_v);
8529         } else {
8530                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
8531                          vtc_m);
8532                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
8533                          vtc_v);
8534         }
8535         /* Protocol. */
8536         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8537                  ipv6_m->hdr.proto);
8538         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8539                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
8540         /* Hop limit. */
8541         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8542                  ipv6_m->hdr.hop_limits);
8543         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8544                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
8545         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8546                  !!(ipv6_m->has_frag_ext));
8547         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8548                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
8549 }
8550
8551 /**
8552  * Add IPV6 fragment extension item to matcher and to the value.
8553  *
8554  * @param[in, out] matcher
8555  *   Flow matcher.
8556  * @param[in, out] key
8557  *   Flow matcher value.
8558  * @param[in] item
8559  *   Flow pattern to translate.
8560  * @param[in] inner
8561  *   Item is inner pattern.
8562  */
8563 static void
8564 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
8565                                      const struct rte_flow_item *item,
8566                                      int inner)
8567 {
8568         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
8569         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
8570         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
8571                 .hdr = {
8572                         .next_header = 0xff,
8573                         .frag_data = RTE_BE16(0xffff),
8574                 },
8575         };
8576         void *headers_m;
8577         void *headers_v;
8578
8579         if (inner) {
8580                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8581                                          inner_headers);
8582                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8583         } else {
8584                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8585                                          outer_headers);
8586                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8587         }
8588         /* IPv6 fragment extension item exists, so packet is IP fragment. */
8589         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
8590         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
8591         if (!ipv6_frag_ext_v)
8592                 return;
8593         if (!ipv6_frag_ext_m)
8594                 ipv6_frag_ext_m = &nic_mask;
8595         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8596                  ipv6_frag_ext_m->hdr.next_header);
8597         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8598                  ipv6_frag_ext_v->hdr.next_header &
8599                  ipv6_frag_ext_m->hdr.next_header);
8600 }
8601
8602 /**
8603  * Add TCP item to matcher and to the value.
8604  *
8605  * @param[in, out] matcher
8606  *   Flow matcher.
8607  * @param[in, out] key
8608  *   Flow matcher value.
8609  * @param[in] item
8610  *   Flow pattern to translate.
8611  * @param[in] inner
8612  *   Item is inner pattern.
8613  */
8614 static void
8615 flow_dv_translate_item_tcp(void *matcher, void *key,
8616                            const struct rte_flow_item *item,
8617                            int inner)
8618 {
8619         const struct rte_flow_item_tcp *tcp_m = item->mask;
8620         const struct rte_flow_item_tcp *tcp_v = item->spec;
8621         void *headers_m;
8622         void *headers_v;
8623
8624         if (inner) {
8625                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8626                                          inner_headers);
8627                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8628         } else {
8629                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8630                                          outer_headers);
8631                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8632         }
8633         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8634         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8635         if (!tcp_v)
8636                 return;
8637         if (!tcp_m)
8638                 tcp_m = &rte_flow_item_tcp_mask;
8639         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8640                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
8641         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8642                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8643         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8644                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8645         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8646                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8647         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8648                  tcp_m->hdr.tcp_flags);
8649         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8650                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8651 }
8652
8653 /**
8654  * Add UDP item to matcher and to the value.
8655  *
8656  * @param[in, out] matcher
8657  *   Flow matcher.
8658  * @param[in, out] key
8659  *   Flow matcher value.
8660  * @param[in] item
8661  *   Flow pattern to translate.
8662  * @param[in] inner
8663  *   Item is inner pattern.
8664  */
8665 static void
8666 flow_dv_translate_item_udp(void *matcher, void *key,
8667                            const struct rte_flow_item *item,
8668                            int inner)
8669 {
8670         const struct rte_flow_item_udp *udp_m = item->mask;
8671         const struct rte_flow_item_udp *udp_v = item->spec;
8672         void *headers_m;
8673         void *headers_v;
8674
8675         if (inner) {
8676                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8677                                          inner_headers);
8678                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8679         } else {
8680                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8681                                          outer_headers);
8682                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8683         }
8684         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8685         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8686         if (!udp_v)
8687                 return;
8688         if (!udp_m)
8689                 udp_m = &rte_flow_item_udp_mask;
8690         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8691                  rte_be_to_cpu_16(udp_m->hdr.src_port));
8692         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8693                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8694         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8695                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
8696         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8697                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8698 }
8699
8700 /**
8701  * Add GRE optional Key item to matcher and to the value.
8702  *
8703  * @param[in, out] matcher
8704  *   Flow matcher.
8705  * @param[in, out] key
8706  *   Flow matcher value.
8707  * @param[in] item
8708  *   Flow pattern to translate.
8709  * @param[in] inner
8710  *   Item is inner pattern.
8711  */
8712 static void
8713 flow_dv_translate_item_gre_key(void *matcher, void *key,
8714                                    const struct rte_flow_item *item)
8715 {
8716         const rte_be32_t *key_m = item->mask;
8717         const rte_be32_t *key_v = item->spec;
8718         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8719         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8720         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8721
8722         /* GRE K bit must be on and should already be validated */
8723         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8724         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8725         if (!key_v)
8726                 return;
8727         if (!key_m)
8728                 key_m = &gre_key_default_mask;
8729         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8730                  rte_be_to_cpu_32(*key_m) >> 8);
8731         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8732                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8733         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8734                  rte_be_to_cpu_32(*key_m) & 0xFF);
8735         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8736                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8737 }
8738
8739 /**
8740  * Add GRE item to matcher and to the value.
8741  *
8742  * @param[in, out] matcher
8743  *   Flow matcher.
8744  * @param[in, out] key
8745  *   Flow matcher value.
8746  * @param[in] item
8747  *   Flow pattern to translate.
8748  * @param[in] inner
8749  *   Item is inner pattern.
8750  */
8751 static void
8752 flow_dv_translate_item_gre(void *matcher, void *key,
8753                            const struct rte_flow_item *item,
8754                            int inner)
8755 {
8756         const struct rte_flow_item_gre *gre_m = item->mask;
8757         const struct rte_flow_item_gre *gre_v = item->spec;
8758         void *headers_m;
8759         void *headers_v;
8760         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8761         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8762         struct {
8763                 union {
8764                         __extension__
8765                         struct {
8766                                 uint16_t version:3;
8767                                 uint16_t rsvd0:9;
8768                                 uint16_t s_present:1;
8769                                 uint16_t k_present:1;
8770                                 uint16_t rsvd_bit1:1;
8771                                 uint16_t c_present:1;
8772                         };
8773                         uint16_t value;
8774                 };
8775         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8776
8777         if (inner) {
8778                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8779                                          inner_headers);
8780                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8781         } else {
8782                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8783                                          outer_headers);
8784                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8785         }
8786         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8787         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8788         if (!gre_v)
8789                 return;
8790         if (!gre_m)
8791                 gre_m = &rte_flow_item_gre_mask;
8792         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
8793                  rte_be_to_cpu_16(gre_m->protocol));
8794         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8795                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
8796         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
8797         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
8798         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
8799                  gre_crks_rsvd0_ver_m.c_present);
8800         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
8801                  gre_crks_rsvd0_ver_v.c_present &
8802                  gre_crks_rsvd0_ver_m.c_present);
8803         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
8804                  gre_crks_rsvd0_ver_m.k_present);
8805         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
8806                  gre_crks_rsvd0_ver_v.k_present &
8807                  gre_crks_rsvd0_ver_m.k_present);
8808         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
8809                  gre_crks_rsvd0_ver_m.s_present);
8810         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
8811                  gre_crks_rsvd0_ver_v.s_present &
8812                  gre_crks_rsvd0_ver_m.s_present);
8813 }
8814
8815 /**
8816  * Add NVGRE item to matcher and to the value.
8817  *
8818  * @param[in, out] matcher
8819  *   Flow matcher.
8820  * @param[in, out] key
8821  *   Flow matcher value.
8822  * @param[in] item
8823  *   Flow pattern to translate.
8824  * @param[in] inner
8825  *   Item is inner pattern.
8826  */
8827 static void
8828 flow_dv_translate_item_nvgre(void *matcher, void *key,
8829                              const struct rte_flow_item *item,
8830                              int inner)
8831 {
8832         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
8833         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
8834         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8835         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8836         const char *tni_flow_id_m;
8837         const char *tni_flow_id_v;
8838         char *gre_key_m;
8839         char *gre_key_v;
8840         int size;
8841         int i;
8842
8843         /* For NVGRE, GRE header fields must be set with defined values. */
8844         const struct rte_flow_item_gre gre_spec = {
8845                 .c_rsvd0_ver = RTE_BE16(0x2000),
8846                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
8847         };
8848         const struct rte_flow_item_gre gre_mask = {
8849                 .c_rsvd0_ver = RTE_BE16(0xB000),
8850                 .protocol = RTE_BE16(UINT16_MAX),
8851         };
8852         const struct rte_flow_item gre_item = {
8853                 .spec = &gre_spec,
8854                 .mask = &gre_mask,
8855                 .last = NULL,
8856         };
8857         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
8858         if (!nvgre_v)
8859                 return;
8860         if (!nvgre_m)
8861                 nvgre_m = &rte_flow_item_nvgre_mask;
8862         tni_flow_id_m = (const char *)nvgre_m->tni;
8863         tni_flow_id_v = (const char *)nvgre_v->tni;
8864         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
8865         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
8866         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
8867         memcpy(gre_key_m, tni_flow_id_m, size);
8868         for (i = 0; i < size; ++i)
8869                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
8870 }
8871
8872 /**
8873  * Add VXLAN item to matcher and to the value.
8874  *
8875  * @param[in] dev
8876  *   Pointer to the Ethernet device structure.
8877  * @param[in] attr
8878  *   Flow rule attributes.
8879  * @param[in, out] matcher
8880  *   Flow matcher.
8881  * @param[in, out] key
8882  *   Flow matcher value.
8883  * @param[in] item
8884  *   Flow pattern to translate.
8885  * @param[in] inner
8886  *   Item is inner pattern.
8887  */
8888 static void
8889 flow_dv_translate_item_vxlan(struct rte_eth_dev *dev,
8890                              const struct rte_flow_attr *attr,
8891                              void *matcher, void *key,
8892                              const struct rte_flow_item *item,
8893                              int inner)
8894 {
8895         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
8896         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
8897         void *headers_m;
8898         void *headers_v;
8899         void *misc5_m;
8900         void *misc5_v;
8901         uint32_t *tunnel_header_v;
8902         uint32_t *tunnel_header_m;
8903         uint16_t dport;
8904         struct mlx5_priv *priv = dev->data->dev_private;
8905         const struct rte_flow_item_vxlan nic_mask = {
8906                 .vni = "\xff\xff\xff",
8907                 .rsvd1 = 0xff,
8908         };
8909
8910         if (inner) {
8911                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8912                                          inner_headers);
8913                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8914         } else {
8915                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8916                                          outer_headers);
8917                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8918         }
8919         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8920                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8921         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8922                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8923                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8924         }
8925         if (!vxlan_v)
8926                 return;
8927         if (!vxlan_m) {
8928                 if ((!attr->group && !priv->sh->tunnel_header_0_1) ||
8929                     (attr->group && !priv->sh->misc5_cap))
8930                         vxlan_m = &rte_flow_item_vxlan_mask;
8931                 else
8932                         vxlan_m = &nic_mask;
8933         }
8934         if ((!attr->group && !attr->transfer && !priv->sh->tunnel_header_0_1) ||
8935             ((attr->group || attr->transfer) && !priv->sh->misc5_cap)) {
8936                 void *misc_m;
8937                 void *misc_v;
8938                 char *vni_m;
8939                 char *vni_v;
8940                 int size;
8941                 int i;
8942                 misc_m = MLX5_ADDR_OF(fte_match_param,
8943                                       matcher, misc_parameters);
8944                 misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8945                 size = sizeof(vxlan_m->vni);
8946                 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8947                 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8948                 memcpy(vni_m, vxlan_m->vni, size);
8949                 for (i = 0; i < size; ++i)
8950                         vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8951                 return;
8952         }
8953         misc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5);
8954         misc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);
8955         tunnel_header_v = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8956                                                    misc5_v,
8957                                                    tunnel_header_1);
8958         tunnel_header_m = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8959                                                    misc5_m,
8960                                                    tunnel_header_1);
8961         *tunnel_header_v = (vxlan_v->vni[0] & vxlan_m->vni[0]) |
8962                            (vxlan_v->vni[1] & vxlan_m->vni[1]) << 8 |
8963                            (vxlan_v->vni[2] & vxlan_m->vni[2]) << 16;
8964         if (*tunnel_header_v)
8965                 *tunnel_header_m = vxlan_m->vni[0] |
8966                         vxlan_m->vni[1] << 8 |
8967                         vxlan_m->vni[2] << 16;
8968         else
8969                 *tunnel_header_m = 0x0;
8970         *tunnel_header_v |= (vxlan_v->rsvd1 & vxlan_m->rsvd1) << 24;
8971         if (vxlan_v->rsvd1 & vxlan_m->rsvd1)
8972                 *tunnel_header_m |= vxlan_m->rsvd1 << 24;
8973 }
8974
8975 /**
8976  * Add VXLAN-GPE item to matcher and to the value.
8977  *
8978  * @param[in, out] matcher
8979  *   Flow matcher.
8980  * @param[in, out] key
8981  *   Flow matcher value.
8982  * @param[in] item
8983  *   Flow pattern to translate.
8984  * @param[in] inner
8985  *   Item is inner pattern.
8986  */
8987
8988 static void
8989 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
8990                                  const struct rte_flow_item *item, int inner)
8991 {
8992         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
8993         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
8994         void *headers_m;
8995         void *headers_v;
8996         void *misc_m =
8997                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
8998         void *misc_v =
8999                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9000         char *vni_m;
9001         char *vni_v;
9002         uint16_t dport;
9003         int size;
9004         int i;
9005         uint8_t flags_m = 0xff;
9006         uint8_t flags_v = 0xc;
9007
9008         if (inner) {
9009                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9010                                          inner_headers);
9011                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9012         } else {
9013                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9014                                          outer_headers);
9015                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9016         }
9017         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
9018                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
9019         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9020                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9021                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9022         }
9023         if (!vxlan_v)
9024                 return;
9025         if (!vxlan_m)
9026                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
9027         size = sizeof(vxlan_m->vni);
9028         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
9029         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
9030         memcpy(vni_m, vxlan_m->vni, size);
9031         for (i = 0; i < size; ++i)
9032                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
9033         if (vxlan_m->flags) {
9034                 flags_m = vxlan_m->flags;
9035                 flags_v = vxlan_v->flags;
9036         }
9037         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
9038         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
9039         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
9040                  vxlan_m->protocol);
9041         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
9042                  vxlan_v->protocol);
9043 }
9044
9045 /**
9046  * Add Geneve item to matcher and to the value.
9047  *
9048  * @param[in, out] matcher
9049  *   Flow matcher.
9050  * @param[in, out] key
9051  *   Flow matcher value.
9052  * @param[in] item
9053  *   Flow pattern to translate.
9054  * @param[in] inner
9055  *   Item is inner pattern.
9056  */
9057
9058 static void
9059 flow_dv_translate_item_geneve(void *matcher, void *key,
9060                               const struct rte_flow_item *item, int inner)
9061 {
9062         const struct rte_flow_item_geneve *geneve_m = item->mask;
9063         const struct rte_flow_item_geneve *geneve_v = item->spec;
9064         void *headers_m;
9065         void *headers_v;
9066         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9067         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9068         uint16_t dport;
9069         uint16_t gbhdr_m;
9070         uint16_t gbhdr_v;
9071         char *vni_m;
9072         char *vni_v;
9073         size_t size, i;
9074
9075         if (inner) {
9076                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9077                                          inner_headers);
9078                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9079         } else {
9080                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9081                                          outer_headers);
9082                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9083         }
9084         dport = MLX5_UDP_PORT_GENEVE;
9085         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9086                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9087                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9088         }
9089         if (!geneve_v)
9090                 return;
9091         if (!geneve_m)
9092                 geneve_m = &rte_flow_item_geneve_mask;
9093         size = sizeof(geneve_m->vni);
9094         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
9095         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
9096         memcpy(vni_m, geneve_m->vni, size);
9097         for (i = 0; i < size; ++i)
9098                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
9099         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
9100                  rte_be_to_cpu_16(geneve_m->protocol));
9101         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
9102                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
9103         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
9104         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
9105         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
9106                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9107         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
9108                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9109         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9110                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9111         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9112                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
9113                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9114 }
9115
9116 /**
9117  * Create Geneve TLV option resource.
9118  *
9119  * @param dev[in, out]
9120  *   Pointer to rte_eth_dev structure.
9121  * @param[in, out] tag_be24
9122  *   Tag value in big endian then R-shift 8.
9123  * @parm[in, out] dev_flow
9124  *   Pointer to the dev_flow.
9125  * @param[out] error
9126  *   pointer to error structure.
9127  *
9128  * @return
9129  *   0 on success otherwise -errno and errno is set.
9130  */
9131
9132 int
9133 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
9134                                              const struct rte_flow_item *item,
9135                                              struct rte_flow_error *error)
9136 {
9137         struct mlx5_priv *priv = dev->data->dev_private;
9138         struct mlx5_dev_ctx_shared *sh = priv->sh;
9139         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
9140                         sh->geneve_tlv_option_resource;
9141         struct mlx5_devx_obj *obj;
9142         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9143         int ret = 0;
9144
9145         if (!geneve_opt_v)
9146                 return -1;
9147         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
9148         if (geneve_opt_resource != NULL) {
9149                 if (geneve_opt_resource->option_class ==
9150                         geneve_opt_v->option_class &&
9151                         geneve_opt_resource->option_type ==
9152                         geneve_opt_v->option_type &&
9153                         geneve_opt_resource->length ==
9154                         geneve_opt_v->option_len) {
9155                         /* We already have GENVE TLV option obj allocated. */
9156                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
9157                                            __ATOMIC_RELAXED);
9158                 } else {
9159                         ret = rte_flow_error_set(error, ENOMEM,
9160                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9161                                 "Only one GENEVE TLV option supported");
9162                         goto exit;
9163                 }
9164         } else {
9165                 /* Create a GENEVE TLV object and resource. */
9166                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
9167                                 geneve_opt_v->option_class,
9168                                 geneve_opt_v->option_type,
9169                                 geneve_opt_v->option_len);
9170                 if (!obj) {
9171                         ret = rte_flow_error_set(error, ENODATA,
9172                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9173                                 "Failed to create GENEVE TLV Devx object");
9174                         goto exit;
9175                 }
9176                 sh->geneve_tlv_option_resource =
9177                                 mlx5_malloc(MLX5_MEM_ZERO,
9178                                                 sizeof(*geneve_opt_resource),
9179                                                 0, SOCKET_ID_ANY);
9180                 if (!sh->geneve_tlv_option_resource) {
9181                         claim_zero(mlx5_devx_cmd_destroy(obj));
9182                         ret = rte_flow_error_set(error, ENOMEM,
9183                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9184                                 "GENEVE TLV object memory allocation failed");
9185                         goto exit;
9186                 }
9187                 geneve_opt_resource = sh->geneve_tlv_option_resource;
9188                 geneve_opt_resource->obj = obj;
9189                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
9190                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
9191                 geneve_opt_resource->length = geneve_opt_v->option_len;
9192                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
9193                                 __ATOMIC_RELAXED);
9194         }
9195 exit:
9196         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
9197         return ret;
9198 }
9199
9200 /**
9201  * Add Geneve TLV option item to matcher.
9202  *
9203  * @param[in, out] dev
9204  *   Pointer to rte_eth_dev structure.
9205  * @param[in, out] matcher
9206  *   Flow matcher.
9207  * @param[in, out] key
9208  *   Flow matcher value.
9209  * @param[in] item
9210  *   Flow pattern to translate.
9211  * @param[out] error
9212  *   Pointer to error structure.
9213  */
9214 static int
9215 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
9216                                   void *key, const struct rte_flow_item *item,
9217                                   struct rte_flow_error *error)
9218 {
9219         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
9220         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9221         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9222         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9223         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9224                         misc_parameters_3);
9225         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9226         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
9227         int ret = 0;
9228
9229         if (!geneve_opt_v)
9230                 return -1;
9231         if (!geneve_opt_m)
9232                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
9233         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
9234                                                            error);
9235         if (ret) {
9236                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
9237                 return ret;
9238         }
9239         /*
9240          * Set the option length in GENEVE header if not requested.
9241          * The GENEVE TLV option length is expressed by the option length field
9242          * in the GENEVE header.
9243          * If the option length was not requested but the GENEVE TLV option item
9244          * is present we set the option length field implicitly.
9245          */
9246         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
9247                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9248                          MLX5_GENEVE_OPTLEN_MASK);
9249                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9250                          geneve_opt_v->option_len + 1);
9251         }
9252         /* Set the data. */
9253         if (geneve_opt_v->data) {
9254                 memcpy(&opt_data_key, geneve_opt_v->data,
9255                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9256                                 sizeof(opt_data_key)));
9257                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9258                                 sizeof(opt_data_key));
9259                 memcpy(&opt_data_mask, geneve_opt_m->data,
9260                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9261                                 sizeof(opt_data_mask)));
9262                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9263                                 sizeof(opt_data_mask));
9264                 MLX5_SET(fte_match_set_misc3, misc3_m,
9265                                 geneve_tlv_option_0_data,
9266                                 rte_be_to_cpu_32(opt_data_mask));
9267                 MLX5_SET(fte_match_set_misc3, misc3_v,
9268                                 geneve_tlv_option_0_data,
9269                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
9270         }
9271         return ret;
9272 }
9273
9274 /**
9275  * Add MPLS item to matcher and to the value.
9276  *
9277  * @param[in, out] matcher
9278  *   Flow matcher.
9279  * @param[in, out] key
9280  *   Flow matcher value.
9281  * @param[in] item
9282  *   Flow pattern to translate.
9283  * @param[in] prev_layer
9284  *   The protocol layer indicated in previous item.
9285  * @param[in] inner
9286  *   Item is inner pattern.
9287  */
9288 static void
9289 flow_dv_translate_item_mpls(void *matcher, void *key,
9290                             const struct rte_flow_item *item,
9291                             uint64_t prev_layer,
9292                             int inner)
9293 {
9294         const uint32_t *in_mpls_m = item->mask;
9295         const uint32_t *in_mpls_v = item->spec;
9296         uint32_t *out_mpls_m = 0;
9297         uint32_t *out_mpls_v = 0;
9298         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9299         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9300         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
9301                                      misc_parameters_2);
9302         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9303         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9304         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9305
9306         switch (prev_layer) {
9307         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9308                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
9309                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9310                          MLX5_UDP_PORT_MPLS);
9311                 break;
9312         case MLX5_FLOW_LAYER_GRE:
9313                 /* Fall-through. */
9314         case MLX5_FLOW_LAYER_GRE_KEY:
9315                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
9316                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
9317                          RTE_ETHER_TYPE_MPLS);
9318                 break;
9319         default:
9320                 break;
9321         }
9322         if (!in_mpls_v)
9323                 return;
9324         if (!in_mpls_m)
9325                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
9326         switch (prev_layer) {
9327         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9328                 out_mpls_m =
9329                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9330                                                  outer_first_mpls_over_udp);
9331                 out_mpls_v =
9332                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9333                                                  outer_first_mpls_over_udp);
9334                 break;
9335         case MLX5_FLOW_LAYER_GRE:
9336                 out_mpls_m =
9337                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9338                                                  outer_first_mpls_over_gre);
9339                 out_mpls_v =
9340                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9341                                                  outer_first_mpls_over_gre);
9342                 break;
9343         default:
9344                 /* Inner MPLS not over GRE is not supported. */
9345                 if (!inner) {
9346                         out_mpls_m =
9347                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9348                                                          misc2_m,
9349                                                          outer_first_mpls);
9350                         out_mpls_v =
9351                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9352                                                          misc2_v,
9353                                                          outer_first_mpls);
9354                 }
9355                 break;
9356         }
9357         if (out_mpls_m && out_mpls_v) {
9358                 *out_mpls_m = *in_mpls_m;
9359                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
9360         }
9361 }
9362
9363 /**
9364  * Add metadata register item to matcher
9365  *
9366  * @param[in, out] matcher
9367  *   Flow matcher.
9368  * @param[in, out] key
9369  *   Flow matcher value.
9370  * @param[in] reg_type
9371  *   Type of device metadata register
9372  * @param[in] value
9373  *   Register value
9374  * @param[in] mask
9375  *   Register mask
9376  */
9377 static void
9378 flow_dv_match_meta_reg(void *matcher, void *key,
9379                        enum modify_reg reg_type,
9380                        uint32_t data, uint32_t mask)
9381 {
9382         void *misc2_m =
9383                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
9384         void *misc2_v =
9385                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9386         uint32_t temp;
9387
9388         data &= mask;
9389         switch (reg_type) {
9390         case REG_A:
9391                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
9392                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
9393                 break;
9394         case REG_B:
9395                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
9396                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
9397                 break;
9398         case REG_C_0:
9399                 /*
9400                  * The metadata register C0 field might be divided into
9401                  * source vport index and META item value, we should set
9402                  * this field according to specified mask, not as whole one.
9403                  */
9404                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
9405                 temp |= mask;
9406                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
9407                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
9408                 temp &= ~mask;
9409                 temp |= data;
9410                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
9411                 break;
9412         case REG_C_1:
9413                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
9414                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
9415                 break;
9416         case REG_C_2:
9417                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
9418                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
9419                 break;
9420         case REG_C_3:
9421                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
9422                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
9423                 break;
9424         case REG_C_4:
9425                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
9426                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
9427                 break;
9428         case REG_C_5:
9429                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
9430                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
9431                 break;
9432         case REG_C_6:
9433                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
9434                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
9435                 break;
9436         case REG_C_7:
9437                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
9438                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
9439                 break;
9440         default:
9441                 MLX5_ASSERT(false);
9442                 break;
9443         }
9444 }
9445
9446 /**
9447  * Add MARK item to matcher
9448  *
9449  * @param[in] dev
9450  *   The device to configure through.
9451  * @param[in, out] matcher
9452  *   Flow matcher.
9453  * @param[in, out] key
9454  *   Flow matcher value.
9455  * @param[in] item
9456  *   Flow pattern to translate.
9457  */
9458 static void
9459 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
9460                             void *matcher, void *key,
9461                             const struct rte_flow_item *item)
9462 {
9463         struct mlx5_priv *priv = dev->data->dev_private;
9464         const struct rte_flow_item_mark *mark;
9465         uint32_t value;
9466         uint32_t mask;
9467
9468         mark = item->mask ? (const void *)item->mask :
9469                             &rte_flow_item_mark_mask;
9470         mask = mark->id & priv->sh->dv_mark_mask;
9471         mark = (const void *)item->spec;
9472         MLX5_ASSERT(mark);
9473         value = mark->id & priv->sh->dv_mark_mask & mask;
9474         if (mask) {
9475                 enum modify_reg reg;
9476
9477                 /* Get the metadata register index for the mark. */
9478                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
9479                 MLX5_ASSERT(reg > 0);
9480                 if (reg == REG_C_0) {
9481                         struct mlx5_priv *priv = dev->data->dev_private;
9482                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9483                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9484
9485                         mask &= msk_c0;
9486                         mask <<= shl_c0;
9487                         value <<= shl_c0;
9488                 }
9489                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9490         }
9491 }
9492
9493 /**
9494  * Add META item to matcher
9495  *
9496  * @param[in] dev
9497  *   The devich to configure through.
9498  * @param[in, out] matcher
9499  *   Flow matcher.
9500  * @param[in, out] key
9501  *   Flow matcher value.
9502  * @param[in] attr
9503  *   Attributes of flow that includes this item.
9504  * @param[in] item
9505  *   Flow pattern to translate.
9506  */
9507 static void
9508 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
9509                             void *matcher, void *key,
9510                             const struct rte_flow_attr *attr,
9511                             const struct rte_flow_item *item)
9512 {
9513         const struct rte_flow_item_meta *meta_m;
9514         const struct rte_flow_item_meta *meta_v;
9515
9516         meta_m = (const void *)item->mask;
9517         if (!meta_m)
9518                 meta_m = &rte_flow_item_meta_mask;
9519         meta_v = (const void *)item->spec;
9520         if (meta_v) {
9521                 int reg;
9522                 uint32_t value = meta_v->data;
9523                 uint32_t mask = meta_m->data;
9524
9525                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
9526                 if (reg < 0)
9527                         return;
9528                 MLX5_ASSERT(reg != REG_NON);
9529                 if (reg == REG_C_0) {
9530                         struct mlx5_priv *priv = dev->data->dev_private;
9531                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9532                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9533
9534                         mask &= msk_c0;
9535                         mask <<= shl_c0;
9536                         value <<= shl_c0;
9537                 }
9538                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9539         }
9540 }
9541
9542 /**
9543  * Add vport metadata Reg C0 item to matcher
9544  *
9545  * @param[in, out] matcher
9546  *   Flow matcher.
9547  * @param[in, out] key
9548  *   Flow matcher value.
9549  * @param[in] reg
9550  *   Flow pattern to translate.
9551  */
9552 static void
9553 flow_dv_translate_item_meta_vport(void *matcher, void *key,
9554                                   uint32_t value, uint32_t mask)
9555 {
9556         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
9557 }
9558
9559 /**
9560  * Add tag item to matcher
9561  *
9562  * @param[in] dev
9563  *   The devich to configure through.
9564  * @param[in, out] matcher
9565  *   Flow matcher.
9566  * @param[in, out] key
9567  *   Flow matcher value.
9568  * @param[in] item
9569  *   Flow pattern to translate.
9570  */
9571 static void
9572 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
9573                                 void *matcher, void *key,
9574                                 const struct rte_flow_item *item)
9575 {
9576         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
9577         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
9578         uint32_t mask, value;
9579
9580         MLX5_ASSERT(tag_v);
9581         value = tag_v->data;
9582         mask = tag_m ? tag_m->data : UINT32_MAX;
9583         if (tag_v->id == REG_C_0) {
9584                 struct mlx5_priv *priv = dev->data->dev_private;
9585                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9586                 uint32_t shl_c0 = rte_bsf32(msk_c0);
9587
9588                 mask &= msk_c0;
9589                 mask <<= shl_c0;
9590                 value <<= shl_c0;
9591         }
9592         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
9593 }
9594
9595 /**
9596  * Add TAG item to matcher
9597  *
9598  * @param[in] dev
9599  *   The devich to configure through.
9600  * @param[in, out] matcher
9601  *   Flow matcher.
9602  * @param[in, out] key
9603  *   Flow matcher value.
9604  * @param[in] item
9605  *   Flow pattern to translate.
9606  */
9607 static void
9608 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
9609                            void *matcher, void *key,
9610                            const struct rte_flow_item *item)
9611 {
9612         const struct rte_flow_item_tag *tag_v = item->spec;
9613         const struct rte_flow_item_tag *tag_m = item->mask;
9614         enum modify_reg reg;
9615
9616         MLX5_ASSERT(tag_v);
9617         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
9618         /* Get the metadata register index for the tag. */
9619         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
9620         MLX5_ASSERT(reg > 0);
9621         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
9622 }
9623
9624 /**
9625  * Add source vport match to the specified matcher.
9626  *
9627  * @param[in, out] matcher
9628  *   Flow matcher.
9629  * @param[in, out] key
9630  *   Flow matcher value.
9631  * @param[in] port
9632  *   Source vport value to match
9633  * @param[in] mask
9634  *   Mask
9635  */
9636 static void
9637 flow_dv_translate_item_source_vport(void *matcher, void *key,
9638                                     int16_t port, uint16_t mask)
9639 {
9640         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9641         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9642
9643         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
9644         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
9645 }
9646
9647 /**
9648  * Translate port-id item to eswitch match on  port-id.
9649  *
9650  * @param[in] dev
9651  *   The devich to configure through.
9652  * @param[in, out] matcher
9653  *   Flow matcher.
9654  * @param[in, out] key
9655  *   Flow matcher value.
9656  * @param[in] item
9657  *   Flow pattern to translate.
9658  * @param[in]
9659  *   Flow attributes.
9660  *
9661  * @return
9662  *   0 on success, a negative errno value otherwise.
9663  */
9664 static int
9665 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
9666                                void *key, const struct rte_flow_item *item,
9667                                const struct rte_flow_attr *attr)
9668 {
9669         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
9670         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
9671         struct mlx5_priv *priv;
9672         uint16_t mask, id;
9673
9674         mask = pid_m ? pid_m->id : 0xffff;
9675         id = pid_v ? pid_v->id : dev->data->port_id;
9676         priv = mlx5_port_to_eswitch_info(id, item == NULL);
9677         if (!priv)
9678                 return -rte_errno;
9679         /*
9680          * Translate to vport field or to metadata, depending on mode.
9681          * Kernel can use either misc.source_port or half of C0 metadata
9682          * register.
9683          */
9684         if (priv->vport_meta_mask) {
9685                 /*
9686                  * Provide the hint for SW steering library
9687                  * to insert the flow into ingress domain and
9688                  * save the extra vport match.
9689                  */
9690                 if (mask == 0xffff && priv->vport_id == 0xffff &&
9691                     priv->pf_bond < 0 && attr->transfer)
9692                         flow_dv_translate_item_source_vport
9693                                 (matcher, key, priv->vport_id, mask);
9694                 /*
9695                  * We should always set the vport metadata register,
9696                  * otherwise the SW steering library can drop
9697                  * the rule if wire vport metadata value is not zero,
9698                  * it depends on kernel configuration.
9699                  */
9700                 flow_dv_translate_item_meta_vport(matcher, key,
9701                                                   priv->vport_meta_tag,
9702                                                   priv->vport_meta_mask);
9703         } else {
9704                 flow_dv_translate_item_source_vport(matcher, key,
9705                                                     priv->vport_id, mask);
9706         }
9707         return 0;
9708 }
9709
9710 /**
9711  * Add ICMP6 item to matcher and to the value.
9712  *
9713  * @param[in, out] matcher
9714  *   Flow matcher.
9715  * @param[in, out] key
9716  *   Flow matcher value.
9717  * @param[in] item
9718  *   Flow pattern to translate.
9719  * @param[in] inner
9720  *   Item is inner pattern.
9721  */
9722 static void
9723 flow_dv_translate_item_icmp6(void *matcher, void *key,
9724                               const struct rte_flow_item *item,
9725                               int inner)
9726 {
9727         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
9728         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
9729         void *headers_m;
9730         void *headers_v;
9731         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9732                                      misc_parameters_3);
9733         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9734         if (inner) {
9735                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9736                                          inner_headers);
9737                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9738         } else {
9739                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9740                                          outer_headers);
9741                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9742         }
9743         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9744         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
9745         if (!icmp6_v)
9746                 return;
9747         if (!icmp6_m)
9748                 icmp6_m = &rte_flow_item_icmp6_mask;
9749         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
9750         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
9751                  icmp6_v->type & icmp6_m->type);
9752         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
9753         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
9754                  icmp6_v->code & icmp6_m->code);
9755 }
9756
9757 /**
9758  * Add ICMP item to matcher and to the value.
9759  *
9760  * @param[in, out] matcher
9761  *   Flow matcher.
9762  * @param[in, out] key
9763  *   Flow matcher value.
9764  * @param[in] item
9765  *   Flow pattern to translate.
9766  * @param[in] inner
9767  *   Item is inner pattern.
9768  */
9769 static void
9770 flow_dv_translate_item_icmp(void *matcher, void *key,
9771                             const struct rte_flow_item *item,
9772                             int inner)
9773 {
9774         const struct rte_flow_item_icmp *icmp_m = item->mask;
9775         const struct rte_flow_item_icmp *icmp_v = item->spec;
9776         uint32_t icmp_header_data_m = 0;
9777         uint32_t icmp_header_data_v = 0;
9778         void *headers_m;
9779         void *headers_v;
9780         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9781                                      misc_parameters_3);
9782         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9783         if (inner) {
9784                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9785                                          inner_headers);
9786                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9787         } else {
9788                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9789                                          outer_headers);
9790                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9791         }
9792         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9793         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
9794         if (!icmp_v)
9795                 return;
9796         if (!icmp_m)
9797                 icmp_m = &rte_flow_item_icmp_mask;
9798         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
9799                  icmp_m->hdr.icmp_type);
9800         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
9801                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
9802         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
9803                  icmp_m->hdr.icmp_code);
9804         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
9805                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
9806         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
9807         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
9808         if (icmp_header_data_m) {
9809                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
9810                 icmp_header_data_v |=
9811                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
9812                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
9813                          icmp_header_data_m);
9814                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
9815                          icmp_header_data_v & icmp_header_data_m);
9816         }
9817 }
9818
9819 /**
9820  * Add GTP item to matcher and to the value.
9821  *
9822  * @param[in, out] matcher
9823  *   Flow matcher.
9824  * @param[in, out] key
9825  *   Flow matcher value.
9826  * @param[in] item
9827  *   Flow pattern to translate.
9828  * @param[in] inner
9829  *   Item is inner pattern.
9830  */
9831 static void
9832 flow_dv_translate_item_gtp(void *matcher, void *key,
9833                            const struct rte_flow_item *item, int inner)
9834 {
9835         const struct rte_flow_item_gtp *gtp_m = item->mask;
9836         const struct rte_flow_item_gtp *gtp_v = item->spec;
9837         void *headers_m;
9838         void *headers_v;
9839         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9840                                      misc_parameters_3);
9841         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9842         uint16_t dport = RTE_GTPU_UDP_PORT;
9843
9844         if (inner) {
9845                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9846                                          inner_headers);
9847                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9848         } else {
9849                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9850                                          outer_headers);
9851                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9852         }
9853         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9854                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9855                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9856         }
9857         if (!gtp_v)
9858                 return;
9859         if (!gtp_m)
9860                 gtp_m = &rte_flow_item_gtp_mask;
9861         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
9862                  gtp_m->v_pt_rsv_flags);
9863         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
9864                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
9865         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
9866         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
9867                  gtp_v->msg_type & gtp_m->msg_type);
9868         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
9869                  rte_be_to_cpu_32(gtp_m->teid));
9870         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
9871                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
9872 }
9873
9874 /**
9875  * Add GTP PSC item to matcher.
9876  *
9877  * @param[in, out] matcher
9878  *   Flow matcher.
9879  * @param[in, out] key
9880  *   Flow matcher value.
9881  * @param[in] item
9882  *   Flow pattern to translate.
9883  */
9884 static int
9885 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
9886                                const struct rte_flow_item *item)
9887 {
9888         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
9889         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
9890         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9891                         misc_parameters_3);
9892         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9893         union {
9894                 uint32_t w32;
9895                 struct {
9896                         uint16_t seq_num;
9897                         uint8_t npdu_num;
9898                         uint8_t next_ext_header_type;
9899                 };
9900         } dw_2;
9901         uint8_t gtp_flags;
9902
9903         /* Always set E-flag match on one, regardless of GTP item settings. */
9904         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
9905         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9906         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
9907         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
9908         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9909         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
9910         /*Set next extension header type. */
9911         dw_2.seq_num = 0;
9912         dw_2.npdu_num = 0;
9913         dw_2.next_ext_header_type = 0xff;
9914         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
9915                  rte_cpu_to_be_32(dw_2.w32));
9916         dw_2.seq_num = 0;
9917         dw_2.npdu_num = 0;
9918         dw_2.next_ext_header_type = 0x85;
9919         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
9920                  rte_cpu_to_be_32(dw_2.w32));
9921         if (gtp_psc_v) {
9922                 union {
9923                         uint32_t w32;
9924                         struct {
9925                                 uint8_t len;
9926                                 uint8_t type_flags;
9927                                 uint8_t qfi;
9928                                 uint8_t reserved;
9929                         };
9930                 } dw_0;
9931
9932                 /*Set extension header PDU type and Qos. */
9933                 if (!gtp_psc_m)
9934                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
9935                 dw_0.w32 = 0;
9936                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type);
9937                 dw_0.qfi = gtp_psc_m->qfi;
9938                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
9939                          rte_cpu_to_be_32(dw_0.w32));
9940                 dw_0.w32 = 0;
9941                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type &
9942                                                         gtp_psc_m->pdu_type);
9943                 dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi;
9944                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9945                          rte_cpu_to_be_32(dw_0.w32));
9946         }
9947         return 0;
9948 }
9949
9950 /**
9951  * Add eCPRI item to matcher and to the value.
9952  *
9953  * @param[in] dev
9954  *   The devich to configure through.
9955  * @param[in, out] matcher
9956  *   Flow matcher.
9957  * @param[in, out] key
9958  *   Flow matcher value.
9959  * @param[in] item
9960  *   Flow pattern to translate.
9961  * @param[in] samples
9962  *   Sample IDs to be used in the matching.
9963  */
9964 static void
9965 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
9966                              void *key, const struct rte_flow_item *item)
9967 {
9968         struct mlx5_priv *priv = dev->data->dev_private;
9969         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
9970         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
9971         struct rte_ecpri_common_hdr common;
9972         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
9973                                      misc_parameters_4);
9974         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
9975         uint32_t *samples;
9976         void *dw_m;
9977         void *dw_v;
9978
9979         if (!ecpri_v)
9980                 return;
9981         if (!ecpri_m)
9982                 ecpri_m = &rte_flow_item_ecpri_mask;
9983         /*
9984          * Maximal four DW samples are supported in a single matching now.
9985          * Two are used now for a eCPRI matching:
9986          * 1. Type: one byte, mask should be 0x00ff0000 in network order
9987          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
9988          *    if any.
9989          */
9990         if (!ecpri_m->hdr.common.u32)
9991                 return;
9992         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
9993         /* Need to take the whole DW as the mask to fill the entry. */
9994         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9995                             prog_sample_field_value_0);
9996         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9997                             prog_sample_field_value_0);
9998         /* Already big endian (network order) in the header. */
9999         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
10000         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
10001         /* Sample#0, used for matching type, offset 0. */
10002         MLX5_SET(fte_match_set_misc4, misc4_m,
10003                  prog_sample_field_id_0, samples[0]);
10004         /* It makes no sense to set the sample ID in the mask field. */
10005         MLX5_SET(fte_match_set_misc4, misc4_v,
10006                  prog_sample_field_id_0, samples[0]);
10007         /*
10008          * Checking if message body part needs to be matched.
10009          * Some wildcard rules only matching type field should be supported.
10010          */
10011         if (ecpri_m->hdr.dummy[0]) {
10012                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
10013                 switch (common.type) {
10014                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
10015                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
10016                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
10017                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
10018                                             prog_sample_field_value_1);
10019                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
10020                                             prog_sample_field_value_1);
10021                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
10022                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
10023                                             ecpri_m->hdr.dummy[0];
10024                         /* Sample#1, to match message body, offset 4. */
10025                         MLX5_SET(fte_match_set_misc4, misc4_m,
10026                                  prog_sample_field_id_1, samples[1]);
10027                         MLX5_SET(fte_match_set_misc4, misc4_v,
10028                                  prog_sample_field_id_1, samples[1]);
10029                         break;
10030                 default:
10031                         /* Others, do not match any sample ID. */
10032                         break;
10033                 }
10034         }
10035 }
10036
10037 /*
10038  * Add connection tracking status item to matcher
10039  *
10040  * @param[in] dev
10041  *   The devich to configure through.
10042  * @param[in, out] matcher
10043  *   Flow matcher.
10044  * @param[in, out] key
10045  *   Flow matcher value.
10046  * @param[in] item
10047  *   Flow pattern to translate.
10048  */
10049 static void
10050 flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
10051                               void *matcher, void *key,
10052                               const struct rte_flow_item *item)
10053 {
10054         uint32_t reg_value = 0;
10055         int reg_id;
10056         /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */
10057         uint32_t reg_mask = 0;
10058         const struct rte_flow_item_conntrack *spec = item->spec;
10059         const struct rte_flow_item_conntrack *mask = item->mask;
10060         uint32_t flags;
10061         struct rte_flow_error error;
10062
10063         if (!mask)
10064                 mask = &rte_flow_item_conntrack_mask;
10065         if (!spec || !mask->flags)
10066                 return;
10067         flags = spec->flags & mask->flags;
10068         /* The conflict should be checked in the validation. */
10069         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
10070                 reg_value |= MLX5_CT_SYNDROME_VALID;
10071         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10072                 reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
10073         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
10074                 reg_value |= MLX5_CT_SYNDROME_INVALID;
10075         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
10076                 reg_value |= MLX5_CT_SYNDROME_TRAP;
10077         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10078                 reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
10079         if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
10080                            RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
10081                            RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
10082                 reg_mask |= 0xc0;
10083         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10084                 reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
10085         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10086                 reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
10087         /* The REG_C_x value could be saved during startup. */
10088         reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);
10089         if (reg_id == REG_NON)
10090                 return;
10091         flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,
10092                                reg_value, reg_mask);
10093 }
10094
10095 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
10096
10097 #define HEADER_IS_ZERO(match_criteria, headers)                              \
10098         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
10099                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
10100
10101 /**
10102  * Calculate flow matcher enable bitmap.
10103  *
10104  * @param match_criteria
10105  *   Pointer to flow matcher criteria.
10106  *
10107  * @return
10108  *   Bitmap of enabled fields.
10109  */
10110 static uint8_t
10111 flow_dv_matcher_enable(uint32_t *match_criteria)
10112 {
10113         uint8_t match_criteria_enable;
10114
10115         match_criteria_enable =
10116                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
10117                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
10118         match_criteria_enable |=
10119                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
10120                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
10121         match_criteria_enable |=
10122                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
10123                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
10124         match_criteria_enable |=
10125                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
10126                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
10127         match_criteria_enable |=
10128                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
10129                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
10130         match_criteria_enable |=
10131                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
10132                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
10133         match_criteria_enable |=
10134                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_5)) <<
10135                 MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT;
10136         return match_criteria_enable;
10137 }
10138
10139 static void
10140 __flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria)
10141 {
10142         /*
10143          * Check flow matching criteria first, subtract misc5/4 length if flow
10144          * doesn't own misc5/4 parameters. In some old rdma-core releases,
10145          * misc5/4 are not supported, and matcher creation failure is expected
10146          * w/o subtration. If misc5 is provided, misc4 must be counted in since
10147          * misc5 is right after misc4.
10148          */
10149         if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) {
10150                 *size = MLX5_ST_SZ_BYTES(fte_match_param) -
10151                         MLX5_ST_SZ_BYTES(fte_match_set_misc5);
10152                 if (!(match_criteria & (1 <<
10153                         MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT))) {
10154                         *size -= MLX5_ST_SZ_BYTES(fte_match_set_misc4);
10155                 }
10156         }
10157 }
10158
10159 static struct mlx5_list_entry *
10160 flow_dv_matcher_clone_cb(void *tool_ctx __rte_unused,
10161                          struct mlx5_list_entry *entry, void *cb_ctx)
10162 {
10163         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10164         struct mlx5_flow_dv_matcher *ref = ctx->data;
10165         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10166                                                             typeof(*tbl), tbl);
10167         struct mlx5_flow_dv_matcher *resource = mlx5_malloc(MLX5_MEM_ANY,
10168                                                             sizeof(*resource),
10169                                                             0, SOCKET_ID_ANY);
10170
10171         if (!resource) {
10172                 rte_flow_error_set(ctx->error, ENOMEM,
10173                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10174                                    "cannot create matcher");
10175                 return NULL;
10176         }
10177         memcpy(resource, entry, sizeof(*resource));
10178         resource->tbl = &tbl->tbl;
10179         return &resource->entry;
10180 }
10181
10182 static void
10183 flow_dv_matcher_clone_free_cb(void *tool_ctx __rte_unused,
10184                              struct mlx5_list_entry *entry)
10185 {
10186         mlx5_free(entry);
10187 }
10188
10189 struct mlx5_list_entry *
10190 flow_dv_tbl_create_cb(void *tool_ctx, void *cb_ctx)
10191 {
10192         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10193         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10194         struct rte_eth_dev *dev = ctx->dev;
10195         struct mlx5_flow_tbl_data_entry *tbl_data;
10196         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data2;
10197         struct rte_flow_error *error = ctx->error;
10198         union mlx5_flow_tbl_key key = { .v64 = *(uint64_t *)(ctx->data) };
10199         struct mlx5_flow_tbl_resource *tbl;
10200         void *domain;
10201         uint32_t idx = 0;
10202         int ret;
10203
10204         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10205         if (!tbl_data) {
10206                 rte_flow_error_set(error, ENOMEM,
10207                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10208                                    NULL,
10209                                    "cannot allocate flow table data entry");
10210                 return NULL;
10211         }
10212         tbl_data->idx = idx;
10213         tbl_data->tunnel = tt_prm->tunnel;
10214         tbl_data->group_id = tt_prm->group_id;
10215         tbl_data->external = !!tt_prm->external;
10216         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
10217         tbl_data->is_egress = !!key.is_egress;
10218         tbl_data->is_transfer = !!key.is_fdb;
10219         tbl_data->dummy = !!key.dummy;
10220         tbl_data->level = key.level;
10221         tbl_data->id = key.id;
10222         tbl = &tbl_data->tbl;
10223         if (key.dummy)
10224                 return &tbl_data->entry;
10225         if (key.is_fdb)
10226                 domain = sh->fdb_domain;
10227         else if (key.is_egress)
10228                 domain = sh->tx_domain;
10229         else
10230                 domain = sh->rx_domain;
10231         ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
10232         if (ret) {
10233                 rte_flow_error_set(error, ENOMEM,
10234                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10235                                    NULL, "cannot create flow table object");
10236                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10237                 return NULL;
10238         }
10239         if (key.level != 0) {
10240                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
10241                                         (tbl->obj, &tbl_data->jump.action);
10242                 if (ret) {
10243                         rte_flow_error_set(error, ENOMEM,
10244                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10245                                            NULL,
10246                                            "cannot create flow jump action");
10247                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10248                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10249                         return NULL;
10250                 }
10251         }
10252         MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
10253               key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
10254               key.level, key.id);
10255         tbl_data->matchers = mlx5_list_create(matcher_name, sh, true,
10256                                               flow_dv_matcher_create_cb,
10257                                               flow_dv_matcher_match_cb,
10258                                               flow_dv_matcher_remove_cb,
10259                                               flow_dv_matcher_clone_cb,
10260                                               flow_dv_matcher_clone_free_cb);
10261         if (!tbl_data->matchers) {
10262                 rte_flow_error_set(error, ENOMEM,
10263                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10264                                    NULL,
10265                                    "cannot create tbl matcher list");
10266                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10267                 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10268                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10269                 return NULL;
10270         }
10271         return &tbl_data->entry;
10272 }
10273
10274 int
10275 flow_dv_tbl_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10276                      void *cb_ctx)
10277 {
10278         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10279         struct mlx5_flow_tbl_data_entry *tbl_data =
10280                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10281         union mlx5_flow_tbl_key key = { .v64 =  *(uint64_t *)(ctx->data) };
10282
10283         return tbl_data->level != key.level ||
10284                tbl_data->id != key.id ||
10285                tbl_data->dummy != key.dummy ||
10286                tbl_data->is_transfer != !!key.is_fdb ||
10287                tbl_data->is_egress != !!key.is_egress;
10288 }
10289
10290 struct mlx5_list_entry *
10291 flow_dv_tbl_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10292                       void *cb_ctx)
10293 {
10294         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10295         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10296         struct mlx5_flow_tbl_data_entry *tbl_data;
10297         struct rte_flow_error *error = ctx->error;
10298         uint32_t idx = 0;
10299
10300         tbl_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10301         if (!tbl_data) {
10302                 rte_flow_error_set(error, ENOMEM,
10303                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10304                                    NULL,
10305                                    "cannot allocate flow table data entry");
10306                 return NULL;
10307         }
10308         memcpy(tbl_data, oentry, sizeof(*tbl_data));
10309         tbl_data->idx = idx;
10310         return &tbl_data->entry;
10311 }
10312
10313 void
10314 flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10315 {
10316         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10317         struct mlx5_flow_tbl_data_entry *tbl_data =
10318                     container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10319
10320         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10321 }
10322
10323 /**
10324  * Get a flow table.
10325  *
10326  * @param[in, out] dev
10327  *   Pointer to rte_eth_dev structure.
10328  * @param[in] table_level
10329  *   Table level to use.
10330  * @param[in] egress
10331  *   Direction of the table.
10332  * @param[in] transfer
10333  *   E-Switch or NIC flow.
10334  * @param[in] dummy
10335  *   Dummy entry for dv API.
10336  * @param[in] table_id
10337  *   Table id to use.
10338  * @param[out] error
10339  *   pointer to error structure.
10340  *
10341  * @return
10342  *   Returns tables resource based on the index, NULL in case of failed.
10343  */
10344 struct mlx5_flow_tbl_resource *
10345 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
10346                          uint32_t table_level, uint8_t egress,
10347                          uint8_t transfer,
10348                          bool external,
10349                          const struct mlx5_flow_tunnel *tunnel,
10350                          uint32_t group_id, uint8_t dummy,
10351                          uint32_t table_id,
10352                          struct rte_flow_error *error)
10353 {
10354         struct mlx5_priv *priv = dev->data->dev_private;
10355         union mlx5_flow_tbl_key table_key = {
10356                 {
10357                         .level = table_level,
10358                         .id = table_id,
10359                         .reserved = 0,
10360                         .dummy = !!dummy,
10361                         .is_fdb = !!transfer,
10362                         .is_egress = !!egress,
10363                 }
10364         };
10365         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
10366                 .tunnel = tunnel,
10367                 .group_id = group_id,
10368                 .external = external,
10369         };
10370         struct mlx5_flow_cb_ctx ctx = {
10371                 .dev = dev,
10372                 .error = error,
10373                 .data = &table_key.v64,
10374                 .data2 = &tt_prm,
10375         };
10376         struct mlx5_list_entry *entry;
10377         struct mlx5_flow_tbl_data_entry *tbl_data;
10378
10379         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
10380         if (!entry) {
10381                 rte_flow_error_set(error, ENOMEM,
10382                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10383                                    "cannot get table");
10384                 return NULL;
10385         }
10386         DRV_LOG(DEBUG, "table_level %u table_id %u "
10387                 "tunnel %u group %u registered.",
10388                 table_level, table_id,
10389                 tunnel ? tunnel->tunnel_id : 0, group_id);
10390         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10391         return &tbl_data->tbl;
10392 }
10393
10394 void
10395 flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10396 {
10397         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10398         struct mlx5_flow_tbl_data_entry *tbl_data =
10399                     container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10400
10401         MLX5_ASSERT(entry && sh);
10402         if (tbl_data->jump.action)
10403                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10404         if (tbl_data->tbl.obj)
10405                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
10406         if (tbl_data->tunnel_offload && tbl_data->external) {
10407                 struct mlx5_list_entry *he;
10408                 struct mlx5_hlist *tunnel_grp_hash;
10409                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
10410                 union tunnel_tbl_key tunnel_key = {
10411                         .tunnel_id = tbl_data->tunnel ?
10412                                         tbl_data->tunnel->tunnel_id : 0,
10413                         .group = tbl_data->group_id
10414                 };
10415                 uint32_t table_level = tbl_data->level;
10416                 struct mlx5_flow_cb_ctx ctx = {
10417                         .data = (void *)&tunnel_key.val,
10418                 };
10419
10420                 tunnel_grp_hash = tbl_data->tunnel ?
10421                                         tbl_data->tunnel->groups :
10422                                         thub->groups;
10423                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, &ctx);
10424                 if (he)
10425                         mlx5_hlist_unregister(tunnel_grp_hash, he);
10426                 DRV_LOG(DEBUG,
10427                         "table_level %u id %u tunnel %u group %u released.",
10428                         table_level,
10429                         tbl_data->id,
10430                         tbl_data->tunnel ?
10431                         tbl_data->tunnel->tunnel_id : 0,
10432                         tbl_data->group_id);
10433         }
10434         mlx5_list_destroy(tbl_data->matchers);
10435         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10436 }
10437
10438 /**
10439  * Release a flow table.
10440  *
10441  * @param[in] sh
10442  *   Pointer to device shared structure.
10443  * @param[in] tbl
10444  *   Table resource to be released.
10445  *
10446  * @return
10447  *   Returns 0 if table was released, else return 1;
10448  */
10449 static int
10450 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
10451                              struct mlx5_flow_tbl_resource *tbl)
10452 {
10453         struct mlx5_flow_tbl_data_entry *tbl_data =
10454                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10455
10456         if (!tbl)
10457                 return 0;
10458         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
10459 }
10460
10461 int
10462 flow_dv_matcher_match_cb(void *tool_ctx __rte_unused,
10463                          struct mlx5_list_entry *entry, void *cb_ctx)
10464 {
10465         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10466         struct mlx5_flow_dv_matcher *ref = ctx->data;
10467         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
10468                                                         entry);
10469
10470         return cur->crc != ref->crc ||
10471                cur->priority != ref->priority ||
10472                memcmp((const void *)cur->mask.buf,
10473                       (const void *)ref->mask.buf, ref->mask.size);
10474 }
10475
10476 struct mlx5_list_entry *
10477 flow_dv_matcher_create_cb(void *tool_ctx, void *cb_ctx)
10478 {
10479         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10480         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10481         struct mlx5_flow_dv_matcher *ref = ctx->data;
10482         struct mlx5_flow_dv_matcher *resource;
10483         struct mlx5dv_flow_matcher_attr dv_attr = {
10484                 .type = IBV_FLOW_ATTR_NORMAL,
10485                 .match_mask = (void *)&ref->mask,
10486         };
10487         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10488                                                             typeof(*tbl), tbl);
10489         int ret;
10490
10491         resource = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*resource), 0,
10492                                SOCKET_ID_ANY);
10493         if (!resource) {
10494                 rte_flow_error_set(ctx->error, ENOMEM,
10495                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10496                                    "cannot create matcher");
10497                 return NULL;
10498         }
10499         *resource = *ref;
10500         dv_attr.match_criteria_enable =
10501                 flow_dv_matcher_enable(resource->mask.buf);
10502         __flow_dv_adjust_buf_size(&ref->mask.size,
10503                                   dv_attr.match_criteria_enable);
10504         dv_attr.priority = ref->priority;
10505         if (tbl->is_egress)
10506                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
10507         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
10508                                                &resource->matcher_object);
10509         if (ret) {
10510                 mlx5_free(resource);
10511                 rte_flow_error_set(ctx->error, ENOMEM,
10512                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10513                                    "cannot create matcher");
10514                 return NULL;
10515         }
10516         return &resource->entry;
10517 }
10518
10519 /**
10520  * Register the flow matcher.
10521  *
10522  * @param[in, out] dev
10523  *   Pointer to rte_eth_dev structure.
10524  * @param[in, out] matcher
10525  *   Pointer to flow matcher.
10526  * @param[in, out] key
10527  *   Pointer to flow table key.
10528  * @parm[in, out] dev_flow
10529  *   Pointer to the dev_flow.
10530  * @param[out] error
10531  *   pointer to error structure.
10532  *
10533  * @return
10534  *   0 on success otherwise -errno and errno is set.
10535  */
10536 static int
10537 flow_dv_matcher_register(struct rte_eth_dev *dev,
10538                          struct mlx5_flow_dv_matcher *ref,
10539                          union mlx5_flow_tbl_key *key,
10540                          struct mlx5_flow *dev_flow,
10541                          const struct mlx5_flow_tunnel *tunnel,
10542                          uint32_t group_id,
10543                          struct rte_flow_error *error)
10544 {
10545         struct mlx5_list_entry *entry;
10546         struct mlx5_flow_dv_matcher *resource;
10547         struct mlx5_flow_tbl_resource *tbl;
10548         struct mlx5_flow_tbl_data_entry *tbl_data;
10549         struct mlx5_flow_cb_ctx ctx = {
10550                 .error = error,
10551                 .data = ref,
10552         };
10553         /**
10554          * tunnel offload API requires this registration for cases when
10555          * tunnel match rule was inserted before tunnel set rule.
10556          */
10557         tbl = flow_dv_tbl_resource_get(dev, key->level,
10558                                        key->is_egress, key->is_fdb,
10559                                        dev_flow->external, tunnel,
10560                                        group_id, 0, key->id, error);
10561         if (!tbl)
10562                 return -rte_errno;      /* No need to refill the error info */
10563         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10564         ref->tbl = tbl;
10565         entry = mlx5_list_register(tbl_data->matchers, &ctx);
10566         if (!entry) {
10567                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10568                 return rte_flow_error_set(error, ENOMEM,
10569                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10570                                           "cannot allocate ref memory");
10571         }
10572         resource = container_of(entry, typeof(*resource), entry);
10573         dev_flow->handle->dvh.matcher = resource;
10574         return 0;
10575 }
10576
10577 struct mlx5_list_entry *
10578 flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx)
10579 {
10580         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10581         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10582         struct mlx5_flow_dv_tag_resource *entry;
10583         uint32_t idx = 0;
10584         int ret;
10585
10586         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10587         if (!entry) {
10588                 rte_flow_error_set(ctx->error, ENOMEM,
10589                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10590                                    "cannot allocate resource memory");
10591                 return NULL;
10592         }
10593         entry->idx = idx;
10594         entry->tag_id = *(uint32_t *)(ctx->data);
10595         ret = mlx5_flow_os_create_flow_action_tag(entry->tag_id,
10596                                                   &entry->action);
10597         if (ret) {
10598                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
10599                 rte_flow_error_set(ctx->error, ENOMEM,
10600                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10601                                    NULL, "cannot create action");
10602                 return NULL;
10603         }
10604         return &entry->entry;
10605 }
10606
10607 int
10608 flow_dv_tag_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10609                      void *cb_ctx)
10610 {
10611         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10612         struct mlx5_flow_dv_tag_resource *tag =
10613                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10614
10615         return *(uint32_t *)(ctx->data) != tag->tag_id;
10616 }
10617
10618 struct mlx5_list_entry *
10619 flow_dv_tag_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10620                      void *cb_ctx)
10621 {
10622         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10623         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10624         struct mlx5_flow_dv_tag_resource *entry;
10625         uint32_t idx = 0;
10626
10627         entry = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10628         if (!entry) {
10629                 rte_flow_error_set(ctx->error, ENOMEM,
10630                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10631                                    "cannot allocate tag resource memory");
10632                 return NULL;
10633         }
10634         memcpy(entry, oentry, sizeof(*entry));
10635         entry->idx = idx;
10636         return &entry->entry;
10637 }
10638
10639 void
10640 flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10641 {
10642         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10643         struct mlx5_flow_dv_tag_resource *tag =
10644                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10645
10646         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10647 }
10648
10649 /**
10650  * Find existing tag resource or create and register a new one.
10651  *
10652  * @param dev[in, out]
10653  *   Pointer to rte_eth_dev structure.
10654  * @param[in, out] tag_be24
10655  *   Tag value in big endian then R-shift 8.
10656  * @parm[in, out] dev_flow
10657  *   Pointer to the dev_flow.
10658  * @param[out] error
10659  *   pointer to error structure.
10660  *
10661  * @return
10662  *   0 on success otherwise -errno and errno is set.
10663  */
10664 static int
10665 flow_dv_tag_resource_register
10666                         (struct rte_eth_dev *dev,
10667                          uint32_t tag_be24,
10668                          struct mlx5_flow *dev_flow,
10669                          struct rte_flow_error *error)
10670 {
10671         struct mlx5_priv *priv = dev->data->dev_private;
10672         struct mlx5_flow_dv_tag_resource *resource;
10673         struct mlx5_list_entry *entry;
10674         struct mlx5_flow_cb_ctx ctx = {
10675                                         .error = error,
10676                                         .data = &tag_be24,
10677                                         };
10678         struct mlx5_hlist *tag_table;
10679
10680         tag_table = flow_dv_hlist_prepare(priv->sh, &priv->sh->tag_table,
10681                                       "tags",
10682                                       MLX5_TAGS_HLIST_ARRAY_SIZE,
10683                                       false, false, priv->sh,
10684                                       flow_dv_tag_create_cb,
10685                                       flow_dv_tag_match_cb,
10686                                       flow_dv_tag_remove_cb,
10687                                       flow_dv_tag_clone_cb,
10688                                       flow_dv_tag_clone_free_cb);
10689         if (unlikely(!tag_table))
10690                 return -rte_errno;
10691         entry = mlx5_hlist_register(tag_table, tag_be24, &ctx);
10692         if (entry) {
10693                 resource = container_of(entry, struct mlx5_flow_dv_tag_resource,
10694                                         entry);
10695                 dev_flow->handle->dvh.rix_tag = resource->idx;
10696                 dev_flow->dv.tag_resource = resource;
10697                 return 0;
10698         }
10699         return -rte_errno;
10700 }
10701
10702 void
10703 flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10704 {
10705         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10706         struct mlx5_flow_dv_tag_resource *tag =
10707                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10708
10709         MLX5_ASSERT(tag && sh && tag->action);
10710         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
10711         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
10712         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10713 }
10714
10715 /**
10716  * Release the tag.
10717  *
10718  * @param dev
10719  *   Pointer to Ethernet device.
10720  * @param tag_idx
10721  *   Tag index.
10722  *
10723  * @return
10724  *   1 while a reference on it exists, 0 when freed.
10725  */
10726 static int
10727 flow_dv_tag_release(struct rte_eth_dev *dev,
10728                     uint32_t tag_idx)
10729 {
10730         struct mlx5_priv *priv = dev->data->dev_private;
10731         struct mlx5_flow_dv_tag_resource *tag;
10732
10733         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
10734         if (!tag)
10735                 return 0;
10736         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
10737                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
10738         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
10739 }
10740
10741 /**
10742  * Translate port ID action to vport.
10743  *
10744  * @param[in] dev
10745  *   Pointer to rte_eth_dev structure.
10746  * @param[in] action
10747  *   Pointer to the port ID action.
10748  * @param[out] dst_port_id
10749  *   The target port ID.
10750  * @param[out] error
10751  *   Pointer to the error structure.
10752  *
10753  * @return
10754  *   0 on success, a negative errno value otherwise and rte_errno is set.
10755  */
10756 static int
10757 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
10758                                  const struct rte_flow_action *action,
10759                                  uint32_t *dst_port_id,
10760                                  struct rte_flow_error *error)
10761 {
10762         uint32_t port;
10763         struct mlx5_priv *priv;
10764         const struct rte_flow_action_port_id *conf =
10765                         (const struct rte_flow_action_port_id *)action->conf;
10766
10767         port = conf->original ? dev->data->port_id : conf->id;
10768         priv = mlx5_port_to_eswitch_info(port, false);
10769         if (!priv)
10770                 return rte_flow_error_set(error, -rte_errno,
10771                                           RTE_FLOW_ERROR_TYPE_ACTION,
10772                                           NULL,
10773                                           "No eswitch info was found for port");
10774 #ifdef HAVE_MLX5DV_DR_CREATE_DEST_IB_PORT
10775         /*
10776          * This parameter is transferred to
10777          * mlx5dv_dr_action_create_dest_ib_port().
10778          */
10779         *dst_port_id = priv->dev_port;
10780 #else
10781         /*
10782          * Legacy mode, no LAG configurations is supported.
10783          * This parameter is transferred to
10784          * mlx5dv_dr_action_create_dest_vport().
10785          */
10786         *dst_port_id = priv->vport_id;
10787 #endif
10788         return 0;
10789 }
10790
10791 /**
10792  * Create a counter with aging configuration.
10793  *
10794  * @param[in] dev
10795  *   Pointer to rte_eth_dev structure.
10796  * @param[in] dev_flow
10797  *   Pointer to the mlx5_flow.
10798  * @param[out] count
10799  *   Pointer to the counter action configuration.
10800  * @param[in] age
10801  *   Pointer to the aging action configuration.
10802  *
10803  * @return
10804  *   Index to flow counter on success, 0 otherwise.
10805  */
10806 static uint32_t
10807 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
10808                                 struct mlx5_flow *dev_flow,
10809                                 const struct rte_flow_action_count *count,
10810                                 const struct rte_flow_action_age *age)
10811 {
10812         uint32_t counter;
10813         struct mlx5_age_param *age_param;
10814
10815         if (count && count->shared)
10816                 counter = flow_dv_counter_get_shared(dev, count->id);
10817         else
10818                 counter = flow_dv_counter_alloc(dev, !!age);
10819         if (!counter || age == NULL)
10820                 return counter;
10821         age_param = flow_dv_counter_idx_get_age(dev, counter);
10822         age_param->context = age->context ? age->context :
10823                 (void *)(uintptr_t)(dev_flow->flow_idx);
10824         age_param->timeout = age->timeout;
10825         age_param->port_id = dev->data->port_id;
10826         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
10827         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
10828         return counter;
10829 }
10830
10831 /**
10832  * Add Tx queue matcher
10833  *
10834  * @param[in] dev
10835  *   Pointer to the dev struct.
10836  * @param[in, out] matcher
10837  *   Flow matcher.
10838  * @param[in, out] key
10839  *   Flow matcher value.
10840  * @param[in] item
10841  *   Flow pattern to translate.
10842  * @param[in] inner
10843  *   Item is inner pattern.
10844  */
10845 static void
10846 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
10847                                 void *matcher, void *key,
10848                                 const struct rte_flow_item *item)
10849 {
10850         const struct mlx5_rte_flow_item_tx_queue *queue_m;
10851         const struct mlx5_rte_flow_item_tx_queue *queue_v;
10852         void *misc_m =
10853                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
10854         void *misc_v =
10855                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
10856         struct mlx5_txq_ctrl *txq;
10857         uint32_t queue;
10858
10859
10860         queue_m = (const void *)item->mask;
10861         if (!queue_m)
10862                 return;
10863         queue_v = (const void *)item->spec;
10864         if (!queue_v)
10865                 return;
10866         txq = mlx5_txq_get(dev, queue_v->queue);
10867         if (!txq)
10868                 return;
10869         queue = txq->obj->sq->id;
10870         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
10871         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
10872                  queue & queue_m->queue);
10873         mlx5_txq_release(dev, queue_v->queue);
10874 }
10875
10876 /**
10877  * Set the hash fields according to the @p flow information.
10878  *
10879  * @param[in] dev_flow
10880  *   Pointer to the mlx5_flow.
10881  * @param[in] rss_desc
10882  *   Pointer to the mlx5_flow_rss_desc.
10883  */
10884 static void
10885 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
10886                        struct mlx5_flow_rss_desc *rss_desc)
10887 {
10888         uint64_t items = dev_flow->handle->layers;
10889         int rss_inner = 0;
10890         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
10891
10892         dev_flow->hash_fields = 0;
10893 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
10894         if (rss_desc->level >= 2) {
10895                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
10896                 rss_inner = 1;
10897         }
10898 #endif
10899         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
10900             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
10901                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
10902                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10903                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
10904                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10905                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
10906                         else
10907                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
10908                 }
10909         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
10910                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
10911                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
10912                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10913                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
10914                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10915                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
10916                         else
10917                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
10918                 }
10919         }
10920         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
10921             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
10922                 if (rss_types & ETH_RSS_UDP) {
10923                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10924                                 dev_flow->hash_fields |=
10925                                                 IBV_RX_HASH_SRC_PORT_UDP;
10926                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10927                                 dev_flow->hash_fields |=
10928                                                 IBV_RX_HASH_DST_PORT_UDP;
10929                         else
10930                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
10931                 }
10932         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
10933                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
10934                 if (rss_types & ETH_RSS_TCP) {
10935                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10936                                 dev_flow->hash_fields |=
10937                                                 IBV_RX_HASH_SRC_PORT_TCP;
10938                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10939                                 dev_flow->hash_fields |=
10940                                                 IBV_RX_HASH_DST_PORT_TCP;
10941                         else
10942                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
10943                 }
10944         }
10945 }
10946
10947 /**
10948  * Prepare an Rx Hash queue.
10949  *
10950  * @param dev
10951  *   Pointer to Ethernet device.
10952  * @param[in] dev_flow
10953  *   Pointer to the mlx5_flow.
10954  * @param[in] rss_desc
10955  *   Pointer to the mlx5_flow_rss_desc.
10956  * @param[out] hrxq_idx
10957  *   Hash Rx queue index.
10958  *
10959  * @return
10960  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
10961  */
10962 static struct mlx5_hrxq *
10963 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
10964                      struct mlx5_flow *dev_flow,
10965                      struct mlx5_flow_rss_desc *rss_desc,
10966                      uint32_t *hrxq_idx)
10967 {
10968         struct mlx5_priv *priv = dev->data->dev_private;
10969         struct mlx5_flow_handle *dh = dev_flow->handle;
10970         struct mlx5_hrxq *hrxq;
10971
10972         MLX5_ASSERT(rss_desc->queue_num);
10973         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
10974         rss_desc->hash_fields = dev_flow->hash_fields;
10975         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
10976         rss_desc->shared_rss = 0;
10977         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
10978         if (!*hrxq_idx)
10979                 return NULL;
10980         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10981                               *hrxq_idx);
10982         return hrxq;
10983 }
10984
10985 /**
10986  * Release sample sub action resource.
10987  *
10988  * @param[in, out] dev
10989  *   Pointer to rte_eth_dev structure.
10990  * @param[in] act_res
10991  *   Pointer to sample sub action resource.
10992  */
10993 static void
10994 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
10995                                    struct mlx5_flow_sub_actions_idx *act_res)
10996 {
10997         if (act_res->rix_hrxq) {
10998                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
10999                 act_res->rix_hrxq = 0;
11000         }
11001         if (act_res->rix_encap_decap) {
11002                 flow_dv_encap_decap_resource_release(dev,
11003                                                      act_res->rix_encap_decap);
11004                 act_res->rix_encap_decap = 0;
11005         }
11006         if (act_res->rix_port_id_action) {
11007                 flow_dv_port_id_action_resource_release(dev,
11008                                                 act_res->rix_port_id_action);
11009                 act_res->rix_port_id_action = 0;
11010         }
11011         if (act_res->rix_tag) {
11012                 flow_dv_tag_release(dev, act_res->rix_tag);
11013                 act_res->rix_tag = 0;
11014         }
11015         if (act_res->rix_jump) {
11016                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
11017                 act_res->rix_jump = 0;
11018         }
11019 }
11020
11021 int
11022 flow_dv_sample_match_cb(void *tool_ctx __rte_unused,
11023                         struct mlx5_list_entry *entry, void *cb_ctx)
11024 {
11025         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11026         struct rte_eth_dev *dev = ctx->dev;
11027         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
11028         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
11029                                                               typeof(*resource),
11030                                                               entry);
11031
11032         if (ctx_resource->ratio == resource->ratio &&
11033             ctx_resource->ft_type == resource->ft_type &&
11034             ctx_resource->ft_id == resource->ft_id &&
11035             ctx_resource->set_action == resource->set_action &&
11036             !memcmp((void *)&ctx_resource->sample_act,
11037                     (void *)&resource->sample_act,
11038                     sizeof(struct mlx5_flow_sub_actions_list))) {
11039                 /*
11040                  * Existing sample action should release the prepared
11041                  * sub-actions reference counter.
11042                  */
11043                 flow_dv_sample_sub_actions_release(dev,
11044                                                    &ctx_resource->sample_idx);
11045                 return 0;
11046         }
11047         return 1;
11048 }
11049
11050 struct mlx5_list_entry *
11051 flow_dv_sample_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11052 {
11053         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11054         struct rte_eth_dev *dev = ctx->dev;
11055         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
11056         void **sample_dv_actions = ctx_resource->sub_actions;
11057         struct mlx5_flow_dv_sample_resource *resource;
11058         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
11059         struct mlx5_priv *priv = dev->data->dev_private;
11060         struct mlx5_dev_ctx_shared *sh = priv->sh;
11061         struct mlx5_flow_tbl_resource *tbl;
11062         uint32_t idx = 0;
11063         const uint32_t next_ft_step = 1;
11064         uint32_t next_ft_id = ctx_resource->ft_id + next_ft_step;
11065         uint8_t is_egress = 0;
11066         uint8_t is_transfer = 0;
11067         struct rte_flow_error *error = ctx->error;
11068
11069         /* Register new sample resource. */
11070         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11071         if (!resource) {
11072                 rte_flow_error_set(error, ENOMEM,
11073                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11074                                           NULL,
11075                                           "cannot allocate resource memory");
11076                 return NULL;
11077         }
11078         *resource = *ctx_resource;
11079         /* Create normal path table level */
11080         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11081                 is_transfer = 1;
11082         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
11083                 is_egress = 1;
11084         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
11085                                         is_egress, is_transfer,
11086                                         true, NULL, 0, 0, 0, error);
11087         if (!tbl) {
11088                 rte_flow_error_set(error, ENOMEM,
11089                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11090                                           NULL,
11091                                           "fail to create normal path table "
11092                                           "for sample");
11093                 goto error;
11094         }
11095         resource->normal_path_tbl = tbl;
11096         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
11097                 if (!sh->default_miss_action) {
11098                         rte_flow_error_set(error, ENOMEM,
11099                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11100                                                 NULL,
11101                                                 "default miss action was not "
11102                                                 "created");
11103                         goto error;
11104                 }
11105                 sample_dv_actions[ctx_resource->sample_act.actions_num++] =
11106                                                 sh->default_miss_action;
11107         }
11108         /* Create a DR sample action */
11109         sampler_attr.sample_ratio = resource->ratio;
11110         sampler_attr.default_next_table = tbl->obj;
11111         sampler_attr.num_sample_actions = ctx_resource->sample_act.actions_num;
11112         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
11113                                                         &sample_dv_actions[0];
11114         sampler_attr.action = resource->set_action;
11115         if (mlx5_os_flow_dr_create_flow_action_sampler
11116                         (&sampler_attr, &resource->verbs_action)) {
11117                 rte_flow_error_set(error, ENOMEM,
11118                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11119                                         NULL, "cannot create sample action");
11120                 goto error;
11121         }
11122         resource->idx = idx;
11123         resource->dev = dev;
11124         return &resource->entry;
11125 error:
11126         if (resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
11127                 flow_dv_sample_sub_actions_release(dev,
11128                                                    &resource->sample_idx);
11129         if (resource->normal_path_tbl)
11130                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11131                                 resource->normal_path_tbl);
11132         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
11133         return NULL;
11134
11135 }
11136
11137 struct mlx5_list_entry *
11138 flow_dv_sample_clone_cb(void *tool_ctx __rte_unused,
11139                          struct mlx5_list_entry *entry __rte_unused,
11140                          void *cb_ctx)
11141 {
11142         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11143         struct rte_eth_dev *dev = ctx->dev;
11144         struct mlx5_flow_dv_sample_resource *resource;
11145         struct mlx5_priv *priv = dev->data->dev_private;
11146         struct mlx5_dev_ctx_shared *sh = priv->sh;
11147         uint32_t idx = 0;
11148
11149         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11150         if (!resource) {
11151                 rte_flow_error_set(ctx->error, ENOMEM,
11152                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11153                                           NULL,
11154                                           "cannot allocate resource memory");
11155                 return NULL;
11156         }
11157         memcpy(resource, entry, sizeof(*resource));
11158         resource->idx = idx;
11159         resource->dev = dev;
11160         return &resource->entry;
11161 }
11162
11163 void
11164 flow_dv_sample_clone_free_cb(void *tool_ctx __rte_unused,
11165                              struct mlx5_list_entry *entry)
11166 {
11167         struct mlx5_flow_dv_sample_resource *resource =
11168                                   container_of(entry, typeof(*resource), entry);
11169         struct rte_eth_dev *dev = resource->dev;
11170         struct mlx5_priv *priv = dev->data->dev_private;
11171
11172         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
11173 }
11174
11175 /**
11176  * Find existing sample resource or create and register a new one.
11177  *
11178  * @param[in, out] dev
11179  *   Pointer to rte_eth_dev structure.
11180  * @param[in] ref
11181  *   Pointer to sample resource reference.
11182  * @parm[in, out] dev_flow
11183  *   Pointer to the dev_flow.
11184  * @param[out] error
11185  *   pointer to error structure.
11186  *
11187  * @return
11188  *   0 on success otherwise -errno and errno is set.
11189  */
11190 static int
11191 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
11192                          struct mlx5_flow_dv_sample_resource *ref,
11193                          struct mlx5_flow *dev_flow,
11194                          struct rte_flow_error *error)
11195 {
11196         struct mlx5_flow_dv_sample_resource *resource;
11197         struct mlx5_list_entry *entry;
11198         struct mlx5_priv *priv = dev->data->dev_private;
11199         struct mlx5_flow_cb_ctx ctx = {
11200                 .dev = dev,
11201                 .error = error,
11202                 .data = ref,
11203         };
11204
11205         entry = mlx5_list_register(priv->sh->sample_action_list, &ctx);
11206         if (!entry)
11207                 return -rte_errno;
11208         resource = container_of(entry, typeof(*resource), entry);
11209         dev_flow->handle->dvh.rix_sample = resource->idx;
11210         dev_flow->dv.sample_res = resource;
11211         return 0;
11212 }
11213
11214 int
11215 flow_dv_dest_array_match_cb(void *tool_ctx __rte_unused,
11216                             struct mlx5_list_entry *entry, void *cb_ctx)
11217 {
11218         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11219         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11220         struct rte_eth_dev *dev = ctx->dev;
11221         struct mlx5_flow_dv_dest_array_resource *resource =
11222                                   container_of(entry, typeof(*resource), entry);
11223         uint32_t idx = 0;
11224
11225         if (ctx_resource->num_of_dest == resource->num_of_dest &&
11226             ctx_resource->ft_type == resource->ft_type &&
11227             !memcmp((void *)resource->sample_act,
11228                     (void *)ctx_resource->sample_act,
11229                    (ctx_resource->num_of_dest *
11230                    sizeof(struct mlx5_flow_sub_actions_list)))) {
11231                 /*
11232                  * Existing sample action should release the prepared
11233                  * sub-actions reference counter.
11234                  */
11235                 for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11236                         flow_dv_sample_sub_actions_release(dev,
11237                                         &ctx_resource->sample_idx[idx]);
11238                 return 0;
11239         }
11240         return 1;
11241 }
11242
11243 struct mlx5_list_entry *
11244 flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11245 {
11246         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11247         struct rte_eth_dev *dev = ctx->dev;
11248         struct mlx5_flow_dv_dest_array_resource *resource;
11249         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11250         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
11251         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
11252         struct mlx5_priv *priv = dev->data->dev_private;
11253         struct mlx5_dev_ctx_shared *sh = priv->sh;
11254         struct mlx5_flow_sub_actions_list *sample_act;
11255         struct mlx5dv_dr_domain *domain;
11256         uint32_t idx = 0, res_idx = 0;
11257         struct rte_flow_error *error = ctx->error;
11258         uint64_t action_flags;
11259         int ret;
11260
11261         /* Register new destination array resource. */
11262         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11263                                             &res_idx);
11264         if (!resource) {
11265                 rte_flow_error_set(error, ENOMEM,
11266                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11267                                           NULL,
11268                                           "cannot allocate resource memory");
11269                 return NULL;
11270         }
11271         *resource = *ctx_resource;
11272         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11273                 domain = sh->fdb_domain;
11274         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
11275                 domain = sh->rx_domain;
11276         else
11277                 domain = sh->tx_domain;
11278         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11279                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
11280                                  mlx5_malloc(MLX5_MEM_ZERO,
11281                                  sizeof(struct mlx5dv_dr_action_dest_attr),
11282                                  0, SOCKET_ID_ANY);
11283                 if (!dest_attr[idx]) {
11284                         rte_flow_error_set(error, ENOMEM,
11285                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11286                                            NULL,
11287                                            "cannot allocate resource memory");
11288                         goto error;
11289                 }
11290                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
11291                 sample_act = &ctx_resource->sample_act[idx];
11292                 action_flags = sample_act->action_flags;
11293                 switch (action_flags) {
11294                 case MLX5_FLOW_ACTION_QUEUE:
11295                         dest_attr[idx]->dest = sample_act->dr_queue_action;
11296                         break;
11297                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
11298                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
11299                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
11300                         dest_attr[idx]->dest_reformat->reformat =
11301                                         sample_act->dr_encap_action;
11302                         dest_attr[idx]->dest_reformat->dest =
11303                                         sample_act->dr_port_id_action;
11304                         break;
11305                 case MLX5_FLOW_ACTION_PORT_ID:
11306                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
11307                         break;
11308                 case MLX5_FLOW_ACTION_JUMP:
11309                         dest_attr[idx]->dest = sample_act->dr_jump_action;
11310                         break;
11311                 default:
11312                         rte_flow_error_set(error, EINVAL,
11313                                            RTE_FLOW_ERROR_TYPE_ACTION,
11314                                            NULL,
11315                                            "unsupported actions type");
11316                         goto error;
11317                 }
11318         }
11319         /* create a dest array actioin */
11320         ret = mlx5_os_flow_dr_create_flow_action_dest_array
11321                                                 (domain,
11322                                                  resource->num_of_dest,
11323                                                  dest_attr,
11324                                                  &resource->action);
11325         if (ret) {
11326                 rte_flow_error_set(error, ENOMEM,
11327                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11328                                    NULL,
11329                                    "cannot create destination array action");
11330                 goto error;
11331         }
11332         resource->idx = res_idx;
11333         resource->dev = dev;
11334         for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11335                 mlx5_free(dest_attr[idx]);
11336         return &resource->entry;
11337 error:
11338         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11339                 flow_dv_sample_sub_actions_release(dev,
11340                                                    &resource->sample_idx[idx]);
11341                 if (dest_attr[idx])
11342                         mlx5_free(dest_attr[idx]);
11343         }
11344         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
11345         return NULL;
11346 }
11347
11348 struct mlx5_list_entry *
11349 flow_dv_dest_array_clone_cb(void *tool_ctx __rte_unused,
11350                             struct mlx5_list_entry *entry __rte_unused,
11351                             void *cb_ctx)
11352 {
11353         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11354         struct rte_eth_dev *dev = ctx->dev;
11355         struct mlx5_flow_dv_dest_array_resource *resource;
11356         struct mlx5_priv *priv = dev->data->dev_private;
11357         struct mlx5_dev_ctx_shared *sh = priv->sh;
11358         uint32_t res_idx = 0;
11359         struct rte_flow_error *error = ctx->error;
11360
11361         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11362                                       &res_idx);
11363         if (!resource) {
11364                 rte_flow_error_set(error, ENOMEM,
11365                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11366                                           NULL,
11367                                           "cannot allocate dest-array memory");
11368                 return NULL;
11369         }
11370         memcpy(resource, entry, sizeof(*resource));
11371         resource->idx = res_idx;
11372         resource->dev = dev;
11373         return &resource->entry;
11374 }
11375
11376 void
11377 flow_dv_dest_array_clone_free_cb(void *tool_ctx __rte_unused,
11378                                  struct mlx5_list_entry *entry)
11379 {
11380         struct mlx5_flow_dv_dest_array_resource *resource =
11381                         container_of(entry, typeof(*resource), entry);
11382         struct rte_eth_dev *dev = resource->dev;
11383         struct mlx5_priv *priv = dev->data->dev_private;
11384
11385         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
11386 }
11387
11388 /**
11389  * Find existing destination array resource or create and register a new one.
11390  *
11391  * @param[in, out] dev
11392  *   Pointer to rte_eth_dev structure.
11393  * @param[in] ref
11394  *   Pointer to destination array resource reference.
11395  * @parm[in, out] dev_flow
11396  *   Pointer to the dev_flow.
11397  * @param[out] error
11398  *   pointer to error structure.
11399  *
11400  * @return
11401  *   0 on success otherwise -errno and errno is set.
11402  */
11403 static int
11404 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
11405                          struct mlx5_flow_dv_dest_array_resource *ref,
11406                          struct mlx5_flow *dev_flow,
11407                          struct rte_flow_error *error)
11408 {
11409         struct mlx5_flow_dv_dest_array_resource *resource;
11410         struct mlx5_priv *priv = dev->data->dev_private;
11411         struct mlx5_list_entry *entry;
11412         struct mlx5_flow_cb_ctx ctx = {
11413                 .dev = dev,
11414                 .error = error,
11415                 .data = ref,
11416         };
11417
11418         entry = mlx5_list_register(priv->sh->dest_array_list, &ctx);
11419         if (!entry)
11420                 return -rte_errno;
11421         resource = container_of(entry, typeof(*resource), entry);
11422         dev_flow->handle->dvh.rix_dest_array = resource->idx;
11423         dev_flow->dv.dest_array_res = resource;
11424         return 0;
11425 }
11426
11427 /**
11428  * Convert Sample action to DV specification.
11429  *
11430  * @param[in] dev
11431  *   Pointer to rte_eth_dev structure.
11432  * @param[in] action
11433  *   Pointer to sample action structure.
11434  * @param[in, out] dev_flow
11435  *   Pointer to the mlx5_flow.
11436  * @param[in] attr
11437  *   Pointer to the flow attributes.
11438  * @param[in, out] num_of_dest
11439  *   Pointer to the num of destination.
11440  * @param[in, out] sample_actions
11441  *   Pointer to sample actions list.
11442  * @param[in, out] res
11443  *   Pointer to sample resource.
11444  * @param[out] error
11445  *   Pointer to the error structure.
11446  *
11447  * @return
11448  *   0 on success, a negative errno value otherwise and rte_errno is set.
11449  */
11450 static int
11451 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
11452                                 const struct rte_flow_action_sample *action,
11453                                 struct mlx5_flow *dev_flow,
11454                                 const struct rte_flow_attr *attr,
11455                                 uint32_t *num_of_dest,
11456                                 void **sample_actions,
11457                                 struct mlx5_flow_dv_sample_resource *res,
11458                                 struct rte_flow_error *error)
11459 {
11460         struct mlx5_priv *priv = dev->data->dev_private;
11461         const struct rte_flow_action *sub_actions;
11462         struct mlx5_flow_sub_actions_list *sample_act;
11463         struct mlx5_flow_sub_actions_idx *sample_idx;
11464         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11465         struct rte_flow *flow = dev_flow->flow;
11466         struct mlx5_flow_rss_desc *rss_desc;
11467         uint64_t action_flags = 0;
11468
11469         MLX5_ASSERT(wks);
11470         rss_desc = &wks->rss_desc;
11471         sample_act = &res->sample_act;
11472         sample_idx = &res->sample_idx;
11473         res->ratio = action->ratio;
11474         sub_actions = action->actions;
11475         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
11476                 int type = sub_actions->type;
11477                 uint32_t pre_rix = 0;
11478                 void *pre_r;
11479                 switch (type) {
11480                 case RTE_FLOW_ACTION_TYPE_QUEUE:
11481                 {
11482                         const struct rte_flow_action_queue *queue;
11483                         struct mlx5_hrxq *hrxq;
11484                         uint32_t hrxq_idx;
11485
11486                         queue = sub_actions->conf;
11487                         rss_desc->queue_num = 1;
11488                         rss_desc->queue[0] = queue->index;
11489                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11490                                                     rss_desc, &hrxq_idx);
11491                         if (!hrxq)
11492                                 return rte_flow_error_set
11493                                         (error, rte_errno,
11494                                          RTE_FLOW_ERROR_TYPE_ACTION,
11495                                          NULL,
11496                                          "cannot create fate queue");
11497                         sample_act->dr_queue_action = hrxq->action;
11498                         sample_idx->rix_hrxq = hrxq_idx;
11499                         sample_actions[sample_act->actions_num++] =
11500                                                 hrxq->action;
11501                         (*num_of_dest)++;
11502                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
11503                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11504                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11505                         dev_flow->handle->fate_action =
11506                                         MLX5_FLOW_FATE_QUEUE;
11507                         break;
11508                 }
11509                 case RTE_FLOW_ACTION_TYPE_RSS:
11510                 {
11511                         struct mlx5_hrxq *hrxq;
11512                         uint32_t hrxq_idx;
11513                         const struct rte_flow_action_rss *rss;
11514                         const uint8_t *rss_key;
11515
11516                         rss = sub_actions->conf;
11517                         memcpy(rss_desc->queue, rss->queue,
11518                                rss->queue_num * sizeof(uint16_t));
11519                         rss_desc->queue_num = rss->queue_num;
11520                         /* NULL RSS key indicates default RSS key. */
11521                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11522                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11523                         /*
11524                          * rss->level and rss.types should be set in advance
11525                          * when expanding items for RSS.
11526                          */
11527                         flow_dv_hashfields_set(dev_flow, rss_desc);
11528                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11529                                                     rss_desc, &hrxq_idx);
11530                         if (!hrxq)
11531                                 return rte_flow_error_set
11532                                         (error, rte_errno,
11533                                          RTE_FLOW_ERROR_TYPE_ACTION,
11534                                          NULL,
11535                                          "cannot create fate queue");
11536                         sample_act->dr_queue_action = hrxq->action;
11537                         sample_idx->rix_hrxq = hrxq_idx;
11538                         sample_actions[sample_act->actions_num++] =
11539                                                 hrxq->action;
11540                         (*num_of_dest)++;
11541                         action_flags |= MLX5_FLOW_ACTION_RSS;
11542                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11543                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11544                         dev_flow->handle->fate_action =
11545                                         MLX5_FLOW_FATE_QUEUE;
11546                         break;
11547                 }
11548                 case RTE_FLOW_ACTION_TYPE_MARK:
11549                 {
11550                         uint32_t tag_be = mlx5_flow_mark_set
11551                                 (((const struct rte_flow_action_mark *)
11552                                 (sub_actions->conf))->id);
11553
11554                         dev_flow->handle->mark = 1;
11555                         pre_rix = dev_flow->handle->dvh.rix_tag;
11556                         /* Save the mark resource before sample */
11557                         pre_r = dev_flow->dv.tag_resource;
11558                         if (flow_dv_tag_resource_register(dev, tag_be,
11559                                                   dev_flow, error))
11560                                 return -rte_errno;
11561                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11562                         sample_act->dr_tag_action =
11563                                 dev_flow->dv.tag_resource->action;
11564                         sample_idx->rix_tag =
11565                                 dev_flow->handle->dvh.rix_tag;
11566                         sample_actions[sample_act->actions_num++] =
11567                                                 sample_act->dr_tag_action;
11568                         /* Recover the mark resource after sample */
11569                         dev_flow->dv.tag_resource = pre_r;
11570                         dev_flow->handle->dvh.rix_tag = pre_rix;
11571                         action_flags |= MLX5_FLOW_ACTION_MARK;
11572                         break;
11573                 }
11574                 case RTE_FLOW_ACTION_TYPE_COUNT:
11575                 {
11576                         if (!flow->counter) {
11577                                 flow->counter =
11578                                         flow_dv_translate_create_counter(dev,
11579                                                 dev_flow, sub_actions->conf,
11580                                                 0);
11581                                 if (!flow->counter)
11582                                         return rte_flow_error_set
11583                                                 (error, rte_errno,
11584                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11585                                                 NULL,
11586                                                 "cannot create counter"
11587                                                 " object.");
11588                         }
11589                         sample_act->dr_cnt_action =
11590                                   (flow_dv_counter_get_by_idx(dev,
11591                                   flow->counter, NULL))->action;
11592                         sample_actions[sample_act->actions_num++] =
11593                                                 sample_act->dr_cnt_action;
11594                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11595                         break;
11596                 }
11597                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11598                 {
11599                         struct mlx5_flow_dv_port_id_action_resource
11600                                         port_id_resource;
11601                         uint32_t port_id = 0;
11602
11603                         memset(&port_id_resource, 0, sizeof(port_id_resource));
11604                         /* Save the port id resource before sample */
11605                         pre_rix = dev_flow->handle->rix_port_id_action;
11606                         pre_r = dev_flow->dv.port_id_action;
11607                         if (flow_dv_translate_action_port_id(dev, sub_actions,
11608                                                              &port_id, error))
11609                                 return -rte_errno;
11610                         port_id_resource.port_id = port_id;
11611                         if (flow_dv_port_id_action_resource_register
11612                             (dev, &port_id_resource, dev_flow, error))
11613                                 return -rte_errno;
11614                         sample_act->dr_port_id_action =
11615                                 dev_flow->dv.port_id_action->action;
11616                         sample_idx->rix_port_id_action =
11617                                 dev_flow->handle->rix_port_id_action;
11618                         sample_actions[sample_act->actions_num++] =
11619                                                 sample_act->dr_port_id_action;
11620                         /* Recover the port id resource after sample */
11621                         dev_flow->dv.port_id_action = pre_r;
11622                         dev_flow->handle->rix_port_id_action = pre_rix;
11623                         (*num_of_dest)++;
11624                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11625                         break;
11626                 }
11627                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11628                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11629                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11630                         /* Save the encap resource before sample */
11631                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
11632                         pre_r = dev_flow->dv.encap_decap;
11633                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
11634                                                            dev_flow,
11635                                                            attr->transfer,
11636                                                            error))
11637                                 return -rte_errno;
11638                         sample_act->dr_encap_action =
11639                                 dev_flow->dv.encap_decap->action;
11640                         sample_idx->rix_encap_decap =
11641                                 dev_flow->handle->dvh.rix_encap_decap;
11642                         sample_actions[sample_act->actions_num++] =
11643                                                 sample_act->dr_encap_action;
11644                         /* Recover the encap resource after sample */
11645                         dev_flow->dv.encap_decap = pre_r;
11646                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
11647                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11648                         break;
11649                 default:
11650                         return rte_flow_error_set(error, EINVAL,
11651                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11652                                 NULL,
11653                                 "Not support for sampler action");
11654                 }
11655         }
11656         sample_act->action_flags = action_flags;
11657         res->ft_id = dev_flow->dv.group;
11658         if (attr->transfer) {
11659                 union {
11660                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
11661                         uint64_t set_action;
11662                 } action_ctx = { .set_action = 0 };
11663
11664                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
11665                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
11666                          MLX5_MODIFICATION_TYPE_SET);
11667                 MLX5_SET(set_action_in, action_ctx.action_in, field,
11668                          MLX5_MODI_META_REG_C_0);
11669                 MLX5_SET(set_action_in, action_ctx.action_in, data,
11670                          priv->vport_meta_tag);
11671                 res->set_action = action_ctx.set_action;
11672         } else if (attr->ingress) {
11673                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11674         } else {
11675                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
11676         }
11677         return 0;
11678 }
11679
11680 /**
11681  * Convert Sample action to DV specification.
11682  *
11683  * @param[in] dev
11684  *   Pointer to rte_eth_dev structure.
11685  * @param[in, out] dev_flow
11686  *   Pointer to the mlx5_flow.
11687  * @param[in] num_of_dest
11688  *   The num of destination.
11689  * @param[in, out] res
11690  *   Pointer to sample resource.
11691  * @param[in, out] mdest_res
11692  *   Pointer to destination array resource.
11693  * @param[in] sample_actions
11694  *   Pointer to sample path actions list.
11695  * @param[in] action_flags
11696  *   Holds the actions detected until now.
11697  * @param[out] error
11698  *   Pointer to the error structure.
11699  *
11700  * @return
11701  *   0 on success, a negative errno value otherwise and rte_errno is set.
11702  */
11703 static int
11704 flow_dv_create_action_sample(struct rte_eth_dev *dev,
11705                              struct mlx5_flow *dev_flow,
11706                              uint32_t num_of_dest,
11707                              struct mlx5_flow_dv_sample_resource *res,
11708                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
11709                              void **sample_actions,
11710                              uint64_t action_flags,
11711                              struct rte_flow_error *error)
11712 {
11713         /* update normal path action resource into last index of array */
11714         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
11715         struct mlx5_flow_sub_actions_list *sample_act =
11716                                         &mdest_res->sample_act[dest_index];
11717         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11718         struct mlx5_flow_rss_desc *rss_desc;
11719         uint32_t normal_idx = 0;
11720         struct mlx5_hrxq *hrxq;
11721         uint32_t hrxq_idx;
11722
11723         MLX5_ASSERT(wks);
11724         rss_desc = &wks->rss_desc;
11725         if (num_of_dest > 1) {
11726                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
11727                         /* Handle QP action for mirroring */
11728                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11729                                                     rss_desc, &hrxq_idx);
11730                         if (!hrxq)
11731                                 return rte_flow_error_set
11732                                      (error, rte_errno,
11733                                       RTE_FLOW_ERROR_TYPE_ACTION,
11734                                       NULL,
11735                                       "cannot create rx queue");
11736                         normal_idx++;
11737                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
11738                         sample_act->dr_queue_action = hrxq->action;
11739                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11740                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11741                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11742                 }
11743                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
11744                         normal_idx++;
11745                         mdest_res->sample_idx[dest_index].rix_encap_decap =
11746                                 dev_flow->handle->dvh.rix_encap_decap;
11747                         sample_act->dr_encap_action =
11748                                 dev_flow->dv.encap_decap->action;
11749                         dev_flow->handle->dvh.rix_encap_decap = 0;
11750                 }
11751                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
11752                         normal_idx++;
11753                         mdest_res->sample_idx[dest_index].rix_port_id_action =
11754                                 dev_flow->handle->rix_port_id_action;
11755                         sample_act->dr_port_id_action =
11756                                 dev_flow->dv.port_id_action->action;
11757                         dev_flow->handle->rix_port_id_action = 0;
11758                 }
11759                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
11760                         normal_idx++;
11761                         mdest_res->sample_idx[dest_index].rix_jump =
11762                                 dev_flow->handle->rix_jump;
11763                         sample_act->dr_jump_action =
11764                                 dev_flow->dv.jump->action;
11765                         dev_flow->handle->rix_jump = 0;
11766                 }
11767                 sample_act->actions_num = normal_idx;
11768                 /* update sample action resource into first index of array */
11769                 mdest_res->ft_type = res->ft_type;
11770                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
11771                                 sizeof(struct mlx5_flow_sub_actions_idx));
11772                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
11773                                 sizeof(struct mlx5_flow_sub_actions_list));
11774                 mdest_res->num_of_dest = num_of_dest;
11775                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
11776                                                          dev_flow, error))
11777                         return rte_flow_error_set(error, EINVAL,
11778                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11779                                                   NULL, "can't create sample "
11780                                                   "action");
11781         } else {
11782                 res->sub_actions = sample_actions;
11783                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
11784                         return rte_flow_error_set(error, EINVAL,
11785                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11786                                                   NULL,
11787                                                   "can't create sample action");
11788         }
11789         return 0;
11790 }
11791
11792 /**
11793  * Remove an ASO age action from age actions list.
11794  *
11795  * @param[in] dev
11796  *   Pointer to the Ethernet device structure.
11797  * @param[in] age
11798  *   Pointer to the aso age action handler.
11799  */
11800 static void
11801 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
11802                                 struct mlx5_aso_age_action *age)
11803 {
11804         struct mlx5_age_info *age_info;
11805         struct mlx5_age_param *age_param = &age->age_params;
11806         struct mlx5_priv *priv = dev->data->dev_private;
11807         uint16_t expected = AGE_CANDIDATE;
11808
11809         age_info = GET_PORT_AGE_INFO(priv);
11810         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
11811                                          AGE_FREE, false, __ATOMIC_RELAXED,
11812                                          __ATOMIC_RELAXED)) {
11813                 /**
11814                  * We need the lock even it is age timeout,
11815                  * since age action may still in process.
11816                  */
11817                 rte_spinlock_lock(&age_info->aged_sl);
11818                 LIST_REMOVE(age, next);
11819                 rte_spinlock_unlock(&age_info->aged_sl);
11820                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
11821         }
11822 }
11823
11824 /**
11825  * Release an ASO age action.
11826  *
11827  * @param[in] dev
11828  *   Pointer to the Ethernet device structure.
11829  * @param[in] age_idx
11830  *   Index of ASO age action to release.
11831  * @param[in] flow
11832  *   True if the release operation is during flow destroy operation.
11833  *   False if the release operation is during action destroy operation.
11834  *
11835  * @return
11836  *   0 when age action was removed, otherwise the number of references.
11837  */
11838 static int
11839 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
11840 {
11841         struct mlx5_priv *priv = dev->data->dev_private;
11842         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11843         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
11844         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
11845
11846         if (!ret) {
11847                 flow_dv_aso_age_remove_from_age(dev, age);
11848                 rte_spinlock_lock(&mng->free_sl);
11849                 LIST_INSERT_HEAD(&mng->free, age, next);
11850                 rte_spinlock_unlock(&mng->free_sl);
11851         }
11852         return ret;
11853 }
11854
11855 /**
11856  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
11857  *
11858  * @param[in] dev
11859  *   Pointer to the Ethernet device structure.
11860  *
11861  * @return
11862  *   0 on success, otherwise negative errno value and rte_errno is set.
11863  */
11864 static int
11865 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
11866 {
11867         struct mlx5_priv *priv = dev->data->dev_private;
11868         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11869         void *old_pools = mng->pools;
11870         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
11871         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
11872         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11873
11874         if (!pools) {
11875                 rte_errno = ENOMEM;
11876                 return -ENOMEM;
11877         }
11878         if (old_pools) {
11879                 memcpy(pools, old_pools,
11880                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
11881                 mlx5_free(old_pools);
11882         } else {
11883                 /* First ASO flow hit allocation - starting ASO data-path. */
11884                 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
11885
11886                 if (ret) {
11887                         mlx5_free(pools);
11888                         return ret;
11889                 }
11890         }
11891         mng->n = resize;
11892         mng->pools = pools;
11893         return 0;
11894 }
11895
11896 /**
11897  * Create and initialize a new ASO aging pool.
11898  *
11899  * @param[in] dev
11900  *   Pointer to the Ethernet device structure.
11901  * @param[out] age_free
11902  *   Where to put the pointer of a new age action.
11903  *
11904  * @return
11905  *   The age actions pool pointer and @p age_free is set on success,
11906  *   NULL otherwise and rte_errno is set.
11907  */
11908 static struct mlx5_aso_age_pool *
11909 flow_dv_age_pool_create(struct rte_eth_dev *dev,
11910                         struct mlx5_aso_age_action **age_free)
11911 {
11912         struct mlx5_priv *priv = dev->data->dev_private;
11913         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11914         struct mlx5_aso_age_pool *pool = NULL;
11915         struct mlx5_devx_obj *obj = NULL;
11916         uint32_t i;
11917
11918         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
11919                                                     priv->sh->pdn);
11920         if (!obj) {
11921                 rte_errno = ENODATA;
11922                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
11923                 return NULL;
11924         }
11925         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
11926         if (!pool) {
11927                 claim_zero(mlx5_devx_cmd_destroy(obj));
11928                 rte_errno = ENOMEM;
11929                 return NULL;
11930         }
11931         pool->flow_hit_aso_obj = obj;
11932         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
11933         rte_spinlock_lock(&mng->resize_sl);
11934         pool->index = mng->next;
11935         /* Resize pools array if there is no room for the new pool in it. */
11936         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
11937                 claim_zero(mlx5_devx_cmd_destroy(obj));
11938                 mlx5_free(pool);
11939                 rte_spinlock_unlock(&mng->resize_sl);
11940                 return NULL;
11941         }
11942         mng->pools[pool->index] = pool;
11943         mng->next++;
11944         rte_spinlock_unlock(&mng->resize_sl);
11945         /* Assign the first action in the new pool, the rest go to free list. */
11946         *age_free = &pool->actions[0];
11947         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
11948                 pool->actions[i].offset = i;
11949                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
11950         }
11951         return pool;
11952 }
11953
11954 /**
11955  * Allocate a ASO aging bit.
11956  *
11957  * @param[in] dev
11958  *   Pointer to the Ethernet device structure.
11959  * @param[out] error
11960  *   Pointer to the error structure.
11961  *
11962  * @return
11963  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
11964  */
11965 static uint32_t
11966 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
11967 {
11968         struct mlx5_priv *priv = dev->data->dev_private;
11969         const struct mlx5_aso_age_pool *pool;
11970         struct mlx5_aso_age_action *age_free = NULL;
11971         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11972
11973         MLX5_ASSERT(mng);
11974         /* Try to get the next free age action bit. */
11975         rte_spinlock_lock(&mng->free_sl);
11976         age_free = LIST_FIRST(&mng->free);
11977         if (age_free) {
11978                 LIST_REMOVE(age_free, next);
11979         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
11980                 rte_spinlock_unlock(&mng->free_sl);
11981                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
11982                                    NULL, "failed to create ASO age pool");
11983                 return 0; /* 0 is an error. */
11984         }
11985         rte_spinlock_unlock(&mng->free_sl);
11986         pool = container_of
11987           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
11988                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
11989                                                                        actions);
11990         if (!age_free->dr_action) {
11991                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
11992                                                  error);
11993
11994                 if (reg_c < 0) {
11995                         rte_flow_error_set(error, rte_errno,
11996                                            RTE_FLOW_ERROR_TYPE_ACTION,
11997                                            NULL, "failed to get reg_c "
11998                                            "for ASO flow hit");
11999                         return 0; /* 0 is an error. */
12000                 }
12001 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
12002                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
12003                                 (priv->sh->rx_domain,
12004                                  pool->flow_hit_aso_obj->obj, age_free->offset,
12005                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
12006                                  (reg_c - REG_C_0));
12007 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
12008                 if (!age_free->dr_action) {
12009                         rte_errno = errno;
12010                         rte_spinlock_lock(&mng->free_sl);
12011                         LIST_INSERT_HEAD(&mng->free, age_free, next);
12012                         rte_spinlock_unlock(&mng->free_sl);
12013                         rte_flow_error_set(error, rte_errno,
12014                                            RTE_FLOW_ERROR_TYPE_ACTION,
12015                                            NULL, "failed to create ASO "
12016                                            "flow hit action");
12017                         return 0; /* 0 is an error. */
12018                 }
12019         }
12020         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
12021         return pool->index | ((age_free->offset + 1) << 16);
12022 }
12023
12024 /**
12025  * Initialize flow ASO age parameters.
12026  *
12027  * @param[in] dev
12028  *   Pointer to rte_eth_dev structure.
12029  * @param[in] age_idx
12030  *   Index of ASO age action.
12031  * @param[in] context
12032  *   Pointer to flow counter age context.
12033  * @param[in] timeout
12034  *   Aging timeout in seconds.
12035  *
12036  */
12037 static void
12038 flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
12039                             uint32_t age_idx,
12040                             void *context,
12041                             uint32_t timeout)
12042 {
12043         struct mlx5_aso_age_action *aso_age;
12044
12045         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
12046         MLX5_ASSERT(aso_age);
12047         aso_age->age_params.context = context;
12048         aso_age->age_params.timeout = timeout;
12049         aso_age->age_params.port_id = dev->data->port_id;
12050         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
12051                          __ATOMIC_RELAXED);
12052         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
12053                          __ATOMIC_RELAXED);
12054 }
12055
12056 static void
12057 flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
12058                                const struct rte_flow_item_integrity *value,
12059                                void *headers_m, void *headers_v)
12060 {
12061         if (mask->l4_ok) {
12062                 /* application l4_ok filter aggregates all hardware l4 filters
12063                  * therefore hw l4_checksum_ok must be implicitly added here.
12064                  */
12065                 struct rte_flow_item_integrity local_item;
12066
12067                 local_item.l4_csum_ok = 1;
12068                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
12069                          local_item.l4_csum_ok);
12070                 if (value->l4_ok) {
12071                         /* application l4_ok = 1 matches sets both hw flags
12072                          * l4_ok and l4_checksum_ok flags to 1.
12073                          */
12074                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12075                                  l4_checksum_ok, local_item.l4_csum_ok);
12076                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok,
12077                                  mask->l4_ok);
12078                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok,
12079                                  value->l4_ok);
12080                 } else {
12081                         /* application l4_ok = 0 matches on hw flag
12082                          * l4_checksum_ok = 0 only.
12083                          */
12084                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12085                                  l4_checksum_ok, 0);
12086                 }
12087         } else if (mask->l4_csum_ok) {
12088                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
12089                          mask->l4_csum_ok);
12090                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
12091                          value->l4_csum_ok);
12092         }
12093 }
12094
12095 static void
12096 flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
12097                                const struct rte_flow_item_integrity *value,
12098                                void *headers_m, void *headers_v,
12099                                bool is_ipv4)
12100 {
12101         if (mask->l3_ok) {
12102                 /* application l3_ok filter aggregates all hardware l3 filters
12103                  * therefore hw ipv4_checksum_ok must be implicitly added here.
12104                  */
12105                 struct rte_flow_item_integrity local_item;
12106
12107                 local_item.ipv4_csum_ok = !!is_ipv4;
12108                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
12109                          local_item.ipv4_csum_ok);
12110                 if (value->l3_ok) {
12111                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12112                                  ipv4_checksum_ok, local_item.ipv4_csum_ok);
12113                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok,
12114                                  mask->l3_ok);
12115                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
12116                                  value->l3_ok);
12117                 } else {
12118                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12119                                  ipv4_checksum_ok, 0);
12120                 }
12121         } else if (mask->ipv4_csum_ok) {
12122                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
12123                          mask->ipv4_csum_ok);
12124                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
12125                          value->ipv4_csum_ok);
12126         }
12127 }
12128
12129 static void
12130 flow_dv_translate_item_integrity(void *matcher, void *key,
12131                                  const struct rte_flow_item *head_item,
12132                                  const struct rte_flow_item *integrity_item)
12133 {
12134         const struct rte_flow_item_integrity *mask = integrity_item->mask;
12135         const struct rte_flow_item_integrity *value = integrity_item->spec;
12136         const struct rte_flow_item *tunnel_item, *end_item, *item;
12137         void *headers_m;
12138         void *headers_v;
12139         uint32_t l3_protocol;
12140
12141         if (!value)
12142                 return;
12143         if (!mask)
12144                 mask = &rte_flow_item_integrity_mask;
12145         if (value->level > 1) {
12146                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12147                                          inner_headers);
12148                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
12149         } else {
12150                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12151                                          outer_headers);
12152                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
12153         }
12154         tunnel_item = mlx5_flow_find_tunnel_item(head_item);
12155         if (value->level > 1) {
12156                 /* tunnel item was verified during the item validation */
12157                 item = tunnel_item;
12158                 end_item = mlx5_find_end_item(tunnel_item);
12159         } else {
12160                 item = head_item;
12161                 end_item = tunnel_item ? tunnel_item :
12162                            mlx5_find_end_item(integrity_item);
12163         }
12164         l3_protocol = mask->l3_ok ?
12165                       mlx5_flow_locate_proto_l3(&item, end_item) : 0;
12166         flow_dv_translate_integrity_l3(mask, value, headers_m, headers_v,
12167                                        l3_protocol == RTE_ETHER_TYPE_IPV4);
12168         flow_dv_translate_integrity_l4(mask, value, headers_m, headers_v);
12169 }
12170
12171 /**
12172  * Prepares DV flow counter with aging configuration.
12173  * Gets it by index when exists, creates a new one when doesn't.
12174  *
12175  * @param[in] dev
12176  *   Pointer to rte_eth_dev structure.
12177  * @param[in] dev_flow
12178  *   Pointer to the mlx5_flow.
12179  * @param[in, out] flow
12180  *   Pointer to the sub flow.
12181  * @param[in] count
12182  *   Pointer to the counter action configuration.
12183  * @param[in] age
12184  *   Pointer to the aging action configuration.
12185  * @param[out] error
12186  *   Pointer to the error structure.
12187  *
12188  * @return
12189  *   Pointer to the counter, NULL otherwise.
12190  */
12191 static struct mlx5_flow_counter *
12192 flow_dv_prepare_counter(struct rte_eth_dev *dev,
12193                         struct mlx5_flow *dev_flow,
12194                         struct rte_flow *flow,
12195                         const struct rte_flow_action_count *count,
12196                         const struct rte_flow_action_age *age,
12197                         struct rte_flow_error *error)
12198 {
12199         if (!flow->counter) {
12200                 flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
12201                                                                  count, age);
12202                 if (!flow->counter) {
12203                         rte_flow_error_set(error, rte_errno,
12204                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12205                                            "cannot create counter object.");
12206                         return NULL;
12207                 }
12208         }
12209         return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
12210 }
12211
12212 /*
12213  * Release an ASO CT action by its own device.
12214  *
12215  * @param[in] dev
12216  *   Pointer to the Ethernet device structure.
12217  * @param[in] idx
12218  *   Index of ASO CT action to release.
12219  *
12220  * @return
12221  *   0 when CT action was removed, otherwise the number of references.
12222  */
12223 static inline int
12224 flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx)
12225 {
12226         struct mlx5_priv *priv = dev->data->dev_private;
12227         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12228         uint32_t ret;
12229         struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12230         enum mlx5_aso_ct_state state =
12231                         __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
12232
12233         /* Cannot release when CT is in the ASO SQ. */
12234         if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
12235                 return -1;
12236         ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED);
12237         if (!ret) {
12238                 if (ct->dr_action_orig) {
12239 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12240                         claim_zero(mlx5_glue->destroy_flow_action
12241                                         (ct->dr_action_orig));
12242 #endif
12243                         ct->dr_action_orig = NULL;
12244                 }
12245                 if (ct->dr_action_rply) {
12246 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12247                         claim_zero(mlx5_glue->destroy_flow_action
12248                                         (ct->dr_action_rply));
12249 #endif
12250                         ct->dr_action_rply = NULL;
12251                 }
12252                 /* Clear the state to free, no need in 1st allocation. */
12253                 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE);
12254                 rte_spinlock_lock(&mng->ct_sl);
12255                 LIST_INSERT_HEAD(&mng->free_cts, ct, next);
12256                 rte_spinlock_unlock(&mng->ct_sl);
12257         }
12258         return (int)ret;
12259 }
12260
12261 static inline int
12262 flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx)
12263 {
12264         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
12265         uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
12266         struct rte_eth_dev *owndev = &rte_eth_devices[owner];
12267         RTE_SET_USED(dev);
12268
12269         MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
12270         if (dev->data->dev_started != 1)
12271                 return -1;
12272         return flow_dv_aso_ct_dev_release(owndev, idx);
12273 }
12274
12275 /*
12276  * Resize the ASO CT pools array by 64 pools.
12277  *
12278  * @param[in] dev
12279  *   Pointer to the Ethernet device structure.
12280  *
12281  * @return
12282  *   0 on success, otherwise negative errno value and rte_errno is set.
12283  */
12284 static int
12285 flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev)
12286 {
12287         struct mlx5_priv *priv = dev->data->dev_private;
12288         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12289         void *old_pools = mng->pools;
12290         /* Magic number now, need a macro. */
12291         uint32_t resize = mng->n + 64;
12292         uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize;
12293         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
12294
12295         if (!pools) {
12296                 rte_errno = ENOMEM;
12297                 return -rte_errno;
12298         }
12299         rte_rwlock_write_lock(&mng->resize_rwl);
12300         /* ASO SQ/QP was already initialized in the startup. */
12301         if (old_pools) {
12302                 /* Realloc could be an alternative choice. */
12303                 rte_memcpy(pools, old_pools,
12304                            mng->n * sizeof(struct mlx5_aso_ct_pool *));
12305                 mlx5_free(old_pools);
12306         }
12307         mng->n = resize;
12308         mng->pools = pools;
12309         rte_rwlock_write_unlock(&mng->resize_rwl);
12310         return 0;
12311 }
12312
12313 /*
12314  * Create and initialize a new ASO CT pool.
12315  *
12316  * @param[in] dev
12317  *   Pointer to the Ethernet device structure.
12318  * @param[out] ct_free
12319  *   Where to put the pointer of a new CT action.
12320  *
12321  * @return
12322  *   The CT actions pool pointer and @p ct_free is set on success,
12323  *   NULL otherwise and rte_errno is set.
12324  */
12325 static struct mlx5_aso_ct_pool *
12326 flow_dv_ct_pool_create(struct rte_eth_dev *dev,
12327                        struct mlx5_aso_ct_action **ct_free)
12328 {
12329         struct mlx5_priv *priv = dev->data->dev_private;
12330         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12331         struct mlx5_aso_ct_pool *pool = NULL;
12332         struct mlx5_devx_obj *obj = NULL;
12333         uint32_t i;
12334         uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
12335
12336         obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->ctx,
12337                                                 priv->sh->pdn, log_obj_size);
12338         if (!obj) {
12339                 rte_errno = ENODATA;
12340                 DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
12341                 return NULL;
12342         }
12343         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
12344         if (!pool) {
12345                 rte_errno = ENOMEM;
12346                 claim_zero(mlx5_devx_cmd_destroy(obj));
12347                 return NULL;
12348         }
12349         pool->devx_obj = obj;
12350         pool->index = mng->next;
12351         /* Resize pools array if there is no room for the new pool in it. */
12352         if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) {
12353                 claim_zero(mlx5_devx_cmd_destroy(obj));
12354                 mlx5_free(pool);
12355                 return NULL;
12356         }
12357         mng->pools[pool->index] = pool;
12358         mng->next++;
12359         /* Assign the first action in the new pool, the rest go to free list. */
12360         *ct_free = &pool->actions[0];
12361         /* Lock outside, the list operation is safe here. */
12362         for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
12363                 /* refcnt is 0 when allocating the memory. */
12364                 pool->actions[i].offset = i;
12365                 LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next);
12366         }
12367         return pool;
12368 }
12369
12370 /*
12371  * Allocate a ASO CT action from free list.
12372  *
12373  * @param[in] dev
12374  *   Pointer to the Ethernet device structure.
12375  * @param[out] error
12376  *   Pointer to the error structure.
12377  *
12378  * @return
12379  *   Index to ASO CT action on success, 0 otherwise and rte_errno is set.
12380  */
12381 static uint32_t
12382 flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12383 {
12384         struct mlx5_priv *priv = dev->data->dev_private;
12385         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12386         struct mlx5_aso_ct_action *ct = NULL;
12387         struct mlx5_aso_ct_pool *pool;
12388         uint8_t reg_c;
12389         uint32_t ct_idx;
12390
12391         MLX5_ASSERT(mng);
12392         if (!priv->config.devx) {
12393                 rte_errno = ENOTSUP;
12394                 return 0;
12395         }
12396         /* Get a free CT action, if no, a new pool will be created. */
12397         rte_spinlock_lock(&mng->ct_sl);
12398         ct = LIST_FIRST(&mng->free_cts);
12399         if (ct) {
12400                 LIST_REMOVE(ct, next);
12401         } else if (!flow_dv_ct_pool_create(dev, &ct)) {
12402                 rte_spinlock_unlock(&mng->ct_sl);
12403                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12404                                    NULL, "failed to create ASO CT pool");
12405                 return 0;
12406         }
12407         rte_spinlock_unlock(&mng->ct_sl);
12408         pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
12409         ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
12410         /* 0: inactive, 1: created, 2+: used by flows. */
12411         __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
12412         reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
12413         if (!ct->dr_action_orig) {
12414 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12415                 ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso
12416                         (priv->sh->rx_domain, pool->devx_obj->obj,
12417                          ct->offset,
12418                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR,
12419                          reg_c - REG_C_0);
12420 #else
12421                 RTE_SET_USED(reg_c);
12422 #endif
12423                 if (!ct->dr_action_orig) {
12424                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12425                         rte_flow_error_set(error, rte_errno,
12426                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12427                                            "failed to create ASO CT action");
12428                         return 0;
12429                 }
12430         }
12431         if (!ct->dr_action_rply) {
12432 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12433                 ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso
12434                         (priv->sh->rx_domain, pool->devx_obj->obj,
12435                          ct->offset,
12436                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER,
12437                          reg_c - REG_C_0);
12438 #endif
12439                 if (!ct->dr_action_rply) {
12440                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12441                         rte_flow_error_set(error, rte_errno,
12442                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12443                                            "failed to create ASO CT action");
12444                         return 0;
12445                 }
12446         }
12447         return ct_idx;
12448 }
12449
12450 /*
12451  * Create a conntrack object with context and actions by using ASO mechanism.
12452  *
12453  * @param[in] dev
12454  *   Pointer to rte_eth_dev structure.
12455  * @param[in] pro
12456  *   Pointer to conntrack information profile.
12457  * @param[out] error
12458  *   Pointer to the error structure.
12459  *
12460  * @return
12461  *   Index to conntrack object on success, 0 otherwise.
12462  */
12463 static uint32_t
12464 flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
12465                                    const struct rte_flow_action_conntrack *pro,
12466                                    struct rte_flow_error *error)
12467 {
12468         struct mlx5_priv *priv = dev->data->dev_private;
12469         struct mlx5_dev_ctx_shared *sh = priv->sh;
12470         struct mlx5_aso_ct_action *ct;
12471         uint32_t idx;
12472
12473         if (!sh->ct_aso_en)
12474                 return rte_flow_error_set(error, ENOTSUP,
12475                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12476                                           "Connection is not supported");
12477         idx = flow_dv_aso_ct_alloc(dev, error);
12478         if (!idx)
12479                 return rte_flow_error_set(error, rte_errno,
12480                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12481                                           "Failed to allocate CT object");
12482         ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12483         if (mlx5_aso_ct_update_by_wqe(sh, ct, pro))
12484                 return rte_flow_error_set(error, EBUSY,
12485                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12486                                           "Failed to update CT");
12487         ct->is_original = !!pro->is_original_dir;
12488         ct->peer = pro->peer_port;
12489         return idx;
12490 }
12491
12492 /**
12493  * Fill the flow with DV spec, lock free
12494  * (mutex should be acquired by caller).
12495  *
12496  * @param[in] dev
12497  *   Pointer to rte_eth_dev structure.
12498  * @param[in, out] dev_flow
12499  *   Pointer to the sub flow.
12500  * @param[in] attr
12501  *   Pointer to the flow attributes.
12502  * @param[in] items
12503  *   Pointer to the list of items.
12504  * @param[in] actions
12505  *   Pointer to the list of actions.
12506  * @param[out] error
12507  *   Pointer to the error structure.
12508  *
12509  * @return
12510  *   0 on success, a negative errno value otherwise and rte_errno is set.
12511  */
12512 static int
12513 flow_dv_translate(struct rte_eth_dev *dev,
12514                   struct mlx5_flow *dev_flow,
12515                   const struct rte_flow_attr *attr,
12516                   const struct rte_flow_item items[],
12517                   const struct rte_flow_action actions[],
12518                   struct rte_flow_error *error)
12519 {
12520         struct mlx5_priv *priv = dev->data->dev_private;
12521         struct mlx5_dev_config *dev_conf = &priv->config;
12522         struct rte_flow *flow = dev_flow->flow;
12523         struct mlx5_flow_handle *handle = dev_flow->handle;
12524         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12525         struct mlx5_flow_rss_desc *rss_desc;
12526         uint64_t item_flags = 0;
12527         uint64_t last_item = 0;
12528         uint64_t action_flags = 0;
12529         struct mlx5_flow_dv_matcher matcher = {
12530                 .mask = {
12531                         .size = sizeof(matcher.mask.buf),
12532                 },
12533         };
12534         int actions_n = 0;
12535         bool actions_end = false;
12536         union {
12537                 struct mlx5_flow_dv_modify_hdr_resource res;
12538                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
12539                             sizeof(struct mlx5_modification_cmd) *
12540                             (MLX5_MAX_MODIFY_NUM + 1)];
12541         } mhdr_dummy;
12542         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
12543         const struct rte_flow_action_count *count = NULL;
12544         const struct rte_flow_action_age *non_shared_age = NULL;
12545         union flow_dv_attr flow_attr = { .attr = 0 };
12546         uint32_t tag_be;
12547         union mlx5_flow_tbl_key tbl_key;
12548         uint32_t modify_action_position = UINT32_MAX;
12549         void *match_mask = matcher.mask.buf;
12550         void *match_value = dev_flow->dv.value.buf;
12551         uint8_t next_protocol = 0xff;
12552         struct rte_vlan_hdr vlan = { 0 };
12553         struct mlx5_flow_dv_dest_array_resource mdest_res;
12554         struct mlx5_flow_dv_sample_resource sample_res;
12555         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
12556         const struct rte_flow_action_sample *sample = NULL;
12557         struct mlx5_flow_sub_actions_list *sample_act;
12558         uint32_t sample_act_pos = UINT32_MAX;
12559         uint32_t age_act_pos = UINT32_MAX;
12560         uint32_t num_of_dest = 0;
12561         int tmp_actions_n = 0;
12562         uint32_t table;
12563         int ret = 0;
12564         const struct mlx5_flow_tunnel *tunnel = NULL;
12565         struct flow_grp_info grp_info = {
12566                 .external = !!dev_flow->external,
12567                 .transfer = !!attr->transfer,
12568                 .fdb_def_rule = !!priv->fdb_def_rule,
12569                 .skip_scale = dev_flow->skip_scale &
12570                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
12571                 .std_tbl_fix = true,
12572         };
12573         const struct rte_flow_item *head_item = items;
12574
12575         if (!wks)
12576                 return rte_flow_error_set(error, ENOMEM,
12577                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12578                                           NULL,
12579                                           "failed to push flow workspace");
12580         rss_desc = &wks->rss_desc;
12581         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
12582         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
12583         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12584                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12585         /* update normal path action resource into last index of array */
12586         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
12587         if (is_tunnel_offload_active(dev)) {
12588                 if (dev_flow->tunnel) {
12589                         RTE_VERIFY(dev_flow->tof_type ==
12590                                    MLX5_TUNNEL_OFFLOAD_MISS_RULE);
12591                         tunnel = dev_flow->tunnel;
12592                 } else {
12593                         tunnel = mlx5_get_tof(items, actions,
12594                                               &dev_flow->tof_type);
12595                         dev_flow->tunnel = tunnel;
12596                 }
12597                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
12598                                         (dev, attr, tunnel, dev_flow->tof_type);
12599         }
12600         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12601                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12602         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
12603                                        &grp_info, error);
12604         if (ret)
12605                 return ret;
12606         dev_flow->dv.group = table;
12607         if (attr->transfer)
12608                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
12609         /* number of actions must be set to 0 in case of dirty stack. */
12610         mhdr_res->actions_num = 0;
12611         if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
12612                 /*
12613                  * do not add decap action if match rule drops packet
12614                  * HW rejects rules with decap & drop
12615                  *
12616                  * if tunnel match rule was inserted before matching tunnel set
12617                  * rule flow table used in the match rule must be registered.
12618                  * current implementation handles that in the
12619                  * flow_dv_match_register() at the function end.
12620                  */
12621                 bool add_decap = true;
12622                 const struct rte_flow_action *ptr = actions;
12623
12624                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
12625                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
12626                                 add_decap = false;
12627                                 break;
12628                         }
12629                 }
12630                 if (add_decap) {
12631                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12632                                                            attr->transfer,
12633                                                            error))
12634                                 return -rte_errno;
12635                         dev_flow->dv.actions[actions_n++] =
12636                                         dev_flow->dv.encap_decap->action;
12637                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12638                 }
12639         }
12640         for (; !actions_end ; actions++) {
12641                 const struct rte_flow_action_queue *queue;
12642                 const struct rte_flow_action_rss *rss;
12643                 const struct rte_flow_action *action = actions;
12644                 const uint8_t *rss_key;
12645                 struct mlx5_flow_tbl_resource *tbl;
12646                 struct mlx5_aso_age_action *age_act;
12647                 struct mlx5_flow_counter *cnt_act;
12648                 uint32_t port_id = 0;
12649                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
12650                 int action_type = actions->type;
12651                 const struct rte_flow_action *found_action = NULL;
12652                 uint32_t jump_group = 0;
12653                 uint32_t owner_idx;
12654                 struct mlx5_aso_ct_action *ct;
12655
12656                 if (!mlx5_flow_os_action_supported(action_type))
12657                         return rte_flow_error_set(error, ENOTSUP,
12658                                                   RTE_FLOW_ERROR_TYPE_ACTION,
12659                                                   actions,
12660                                                   "action not supported");
12661                 switch (action_type) {
12662                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
12663                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
12664                         break;
12665                 case RTE_FLOW_ACTION_TYPE_VOID:
12666                         break;
12667                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
12668                         if (flow_dv_translate_action_port_id(dev, action,
12669                                                              &port_id, error))
12670                                 return -rte_errno;
12671                         port_id_resource.port_id = port_id;
12672                         MLX5_ASSERT(!handle->rix_port_id_action);
12673                         if (flow_dv_port_id_action_resource_register
12674                             (dev, &port_id_resource, dev_flow, error))
12675                                 return -rte_errno;
12676                         dev_flow->dv.actions[actions_n++] =
12677                                         dev_flow->dv.port_id_action->action;
12678                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12679                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
12680                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12681                         num_of_dest++;
12682                         break;
12683                 case RTE_FLOW_ACTION_TYPE_FLAG:
12684                         action_flags |= MLX5_FLOW_ACTION_FLAG;
12685                         dev_flow->handle->mark = 1;
12686                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12687                                 struct rte_flow_action_mark mark = {
12688                                         .id = MLX5_FLOW_MARK_DEFAULT,
12689                                 };
12690
12691                                 if (flow_dv_convert_action_mark(dev, &mark,
12692                                                                 mhdr_res,
12693                                                                 error))
12694                                         return -rte_errno;
12695                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12696                                 break;
12697                         }
12698                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
12699                         /*
12700                          * Only one FLAG or MARK is supported per device flow
12701                          * right now. So the pointer to the tag resource must be
12702                          * zero before the register process.
12703                          */
12704                         MLX5_ASSERT(!handle->dvh.rix_tag);
12705                         if (flow_dv_tag_resource_register(dev, tag_be,
12706                                                           dev_flow, error))
12707                                 return -rte_errno;
12708                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12709                         dev_flow->dv.actions[actions_n++] =
12710                                         dev_flow->dv.tag_resource->action;
12711                         break;
12712                 case RTE_FLOW_ACTION_TYPE_MARK:
12713                         action_flags |= MLX5_FLOW_ACTION_MARK;
12714                         dev_flow->handle->mark = 1;
12715                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12716                                 const struct rte_flow_action_mark *mark =
12717                                         (const struct rte_flow_action_mark *)
12718                                                 actions->conf;
12719
12720                                 if (flow_dv_convert_action_mark(dev, mark,
12721                                                                 mhdr_res,
12722                                                                 error))
12723                                         return -rte_errno;
12724                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12725                                 break;
12726                         }
12727                         /* Fall-through */
12728                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
12729                         /* Legacy (non-extensive) MARK action. */
12730                         tag_be = mlx5_flow_mark_set
12731                               (((const struct rte_flow_action_mark *)
12732                                (actions->conf))->id);
12733                         MLX5_ASSERT(!handle->dvh.rix_tag);
12734                         if (flow_dv_tag_resource_register(dev, tag_be,
12735                                                           dev_flow, error))
12736                                 return -rte_errno;
12737                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12738                         dev_flow->dv.actions[actions_n++] =
12739                                         dev_flow->dv.tag_resource->action;
12740                         break;
12741                 case RTE_FLOW_ACTION_TYPE_SET_META:
12742                         if (flow_dv_convert_action_set_meta
12743                                 (dev, mhdr_res, attr,
12744                                  (const struct rte_flow_action_set_meta *)
12745                                   actions->conf, error))
12746                                 return -rte_errno;
12747                         action_flags |= MLX5_FLOW_ACTION_SET_META;
12748                         break;
12749                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
12750                         if (flow_dv_convert_action_set_tag
12751                                 (dev, mhdr_res,
12752                                  (const struct rte_flow_action_set_tag *)
12753                                   actions->conf, error))
12754                                 return -rte_errno;
12755                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12756                         break;
12757                 case RTE_FLOW_ACTION_TYPE_DROP:
12758                         action_flags |= MLX5_FLOW_ACTION_DROP;
12759                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
12760                         break;
12761                 case RTE_FLOW_ACTION_TYPE_QUEUE:
12762                         queue = actions->conf;
12763                         rss_desc->queue_num = 1;
12764                         rss_desc->queue[0] = queue->index;
12765                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
12766                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
12767                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
12768                         num_of_dest++;
12769                         break;
12770                 case RTE_FLOW_ACTION_TYPE_RSS:
12771                         rss = actions->conf;
12772                         memcpy(rss_desc->queue, rss->queue,
12773                                rss->queue_num * sizeof(uint16_t));
12774                         rss_desc->queue_num = rss->queue_num;
12775                         /* NULL RSS key indicates default RSS key. */
12776                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
12777                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12778                         /*
12779                          * rss->level and rss.types should be set in advance
12780                          * when expanding items for RSS.
12781                          */
12782                         action_flags |= MLX5_FLOW_ACTION_RSS;
12783                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
12784                                 MLX5_FLOW_FATE_SHARED_RSS :
12785                                 MLX5_FLOW_FATE_QUEUE;
12786                         break;
12787                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
12788                         flow->age = (uint32_t)(uintptr_t)(action->conf);
12789                         age_act = flow_aso_age_get_by_idx(dev, flow->age);
12790                         __atomic_fetch_add(&age_act->refcnt, 1,
12791                                            __ATOMIC_RELAXED);
12792                         age_act_pos = actions_n++;
12793                         action_flags |= MLX5_FLOW_ACTION_AGE;
12794                         break;
12795                 case RTE_FLOW_ACTION_TYPE_AGE:
12796                         non_shared_age = action->conf;
12797                         age_act_pos = actions_n++;
12798                         action_flags |= MLX5_FLOW_ACTION_AGE;
12799                         break;
12800                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
12801                         flow->counter = (uint32_t)(uintptr_t)(action->conf);
12802                         cnt_act = flow_dv_counter_get_by_idx(dev, flow->counter,
12803                                                              NULL);
12804                         __atomic_fetch_add(&cnt_act->shared_info.refcnt, 1,
12805                                            __ATOMIC_RELAXED);
12806                         /* Save information first, will apply later. */
12807                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12808                         break;
12809                 case RTE_FLOW_ACTION_TYPE_COUNT:
12810                         if (!dev_conf->devx) {
12811                                 return rte_flow_error_set
12812                                               (error, ENOTSUP,
12813                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12814                                                NULL,
12815                                                "count action not supported");
12816                         }
12817                         /* Save information first, will apply later. */
12818                         count = action->conf;
12819                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12820                         break;
12821                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
12822                         dev_flow->dv.actions[actions_n++] =
12823                                                 priv->sh->pop_vlan_action;
12824                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
12825                         break;
12826                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
12827                         if (!(action_flags &
12828                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
12829                                 flow_dev_get_vlan_info_from_items(items, &vlan);
12830                         vlan.eth_proto = rte_be_to_cpu_16
12831                              ((((const struct rte_flow_action_of_push_vlan *)
12832                                                    actions->conf)->ethertype));
12833                         found_action = mlx5_flow_find_action
12834                                         (actions + 1,
12835                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
12836                         if (found_action)
12837                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12838                         found_action = mlx5_flow_find_action
12839                                         (actions + 1,
12840                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
12841                         if (found_action)
12842                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12843                         if (flow_dv_create_action_push_vlan
12844                                             (dev, attr, &vlan, dev_flow, error))
12845                                 return -rte_errno;
12846                         dev_flow->dv.actions[actions_n++] =
12847                                         dev_flow->dv.push_vlan_res->action;
12848                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
12849                         break;
12850                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
12851                         /* of_vlan_push action handled this action */
12852                         MLX5_ASSERT(action_flags &
12853                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
12854                         break;
12855                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
12856                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
12857                                 break;
12858                         flow_dev_get_vlan_info_from_items(items, &vlan);
12859                         mlx5_update_vlan_vid_pcp(actions, &vlan);
12860                         /* If no VLAN push - this is a modify header action */
12861                         if (flow_dv_convert_action_modify_vlan_vid
12862                                                 (mhdr_res, actions, error))
12863                                 return -rte_errno;
12864                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
12865                         break;
12866                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
12867                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
12868                         if (flow_dv_create_action_l2_encap(dev, actions,
12869                                                            dev_flow,
12870                                                            attr->transfer,
12871                                                            error))
12872                                 return -rte_errno;
12873                         dev_flow->dv.actions[actions_n++] =
12874                                         dev_flow->dv.encap_decap->action;
12875                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12876                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12877                                 sample_act->action_flags |=
12878                                                         MLX5_FLOW_ACTION_ENCAP;
12879                         break;
12880                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
12881                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
12882                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12883                                                            attr->transfer,
12884                                                            error))
12885                                 return -rte_errno;
12886                         dev_flow->dv.actions[actions_n++] =
12887                                         dev_flow->dv.encap_decap->action;
12888                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12889                         break;
12890                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
12891                         /* Handle encap with preceding decap. */
12892                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
12893                                 if (flow_dv_create_action_raw_encap
12894                                         (dev, actions, dev_flow, attr, error))
12895                                         return -rte_errno;
12896                                 dev_flow->dv.actions[actions_n++] =
12897                                         dev_flow->dv.encap_decap->action;
12898                         } else {
12899                                 /* Handle encap without preceding decap. */
12900                                 if (flow_dv_create_action_l2_encap
12901                                     (dev, actions, dev_flow, attr->transfer,
12902                                      error))
12903                                         return -rte_errno;
12904                                 dev_flow->dv.actions[actions_n++] =
12905                                         dev_flow->dv.encap_decap->action;
12906                         }
12907                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12908                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12909                                 sample_act->action_flags |=
12910                                                         MLX5_FLOW_ACTION_ENCAP;
12911                         break;
12912                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
12913                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
12914                                 ;
12915                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
12916                                 if (flow_dv_create_action_l2_decap
12917                                     (dev, dev_flow, attr->transfer, error))
12918                                         return -rte_errno;
12919                                 dev_flow->dv.actions[actions_n++] =
12920                                         dev_flow->dv.encap_decap->action;
12921                         }
12922                         /* If decap is followed by encap, handle it at encap. */
12923                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12924                         break;
12925                 case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
12926                         dev_flow->dv.actions[actions_n++] =
12927                                 (void *)(uintptr_t)action->conf;
12928                         action_flags |= MLX5_FLOW_ACTION_JUMP;
12929                         break;
12930                 case RTE_FLOW_ACTION_TYPE_JUMP:
12931                         jump_group = ((const struct rte_flow_action_jump *)
12932                                                         action->conf)->group;
12933                         grp_info.std_tbl_fix = 0;
12934                         if (dev_flow->skip_scale &
12935                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
12936                                 grp_info.skip_scale = 1;
12937                         else
12938                                 grp_info.skip_scale = 0;
12939                         ret = mlx5_flow_group_to_table(dev, tunnel,
12940                                                        jump_group,
12941                                                        &table,
12942                                                        &grp_info, error);
12943                         if (ret)
12944                                 return ret;
12945                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
12946                                                        attr->transfer,
12947                                                        !!dev_flow->external,
12948                                                        tunnel, jump_group, 0,
12949                                                        0, error);
12950                         if (!tbl)
12951                                 return rte_flow_error_set
12952                                                 (error, errno,
12953                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12954                                                  NULL,
12955                                                  "cannot create jump action.");
12956                         if (flow_dv_jump_tbl_resource_register
12957                             (dev, tbl, dev_flow, error)) {
12958                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
12959                                 return rte_flow_error_set
12960                                                 (error, errno,
12961                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12962                                                  NULL,
12963                                                  "cannot create jump action.");
12964                         }
12965                         dev_flow->dv.actions[actions_n++] =
12966                                         dev_flow->dv.jump->action;
12967                         action_flags |= MLX5_FLOW_ACTION_JUMP;
12968                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
12969                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
12970                         num_of_dest++;
12971                         break;
12972                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
12973                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
12974                         if (flow_dv_convert_action_modify_mac
12975                                         (mhdr_res, actions, error))
12976                                 return -rte_errno;
12977                         action_flags |= actions->type ==
12978                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
12979                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
12980                                         MLX5_FLOW_ACTION_SET_MAC_DST;
12981                         break;
12982                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
12983                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
12984                         if (flow_dv_convert_action_modify_ipv4
12985                                         (mhdr_res, actions, error))
12986                                 return -rte_errno;
12987                         action_flags |= actions->type ==
12988                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
12989                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
12990                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
12991                         break;
12992                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
12993                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
12994                         if (flow_dv_convert_action_modify_ipv6
12995                                         (mhdr_res, actions, error))
12996                                 return -rte_errno;
12997                         action_flags |= actions->type ==
12998                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
12999                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
13000                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
13001                         break;
13002                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
13003                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
13004                         if (flow_dv_convert_action_modify_tp
13005                                         (mhdr_res, actions, items,
13006                                          &flow_attr, dev_flow, !!(action_flags &
13007                                          MLX5_FLOW_ACTION_DECAP), error))
13008                                 return -rte_errno;
13009                         action_flags |= actions->type ==
13010                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
13011                                         MLX5_FLOW_ACTION_SET_TP_SRC :
13012                                         MLX5_FLOW_ACTION_SET_TP_DST;
13013                         break;
13014                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
13015                         if (flow_dv_convert_action_modify_dec_ttl
13016                                         (mhdr_res, items, &flow_attr, dev_flow,
13017                                          !!(action_flags &
13018                                          MLX5_FLOW_ACTION_DECAP), error))
13019                                 return -rte_errno;
13020                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
13021                         break;
13022                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
13023                         if (flow_dv_convert_action_modify_ttl
13024                                         (mhdr_res, actions, items, &flow_attr,
13025                                          dev_flow, !!(action_flags &
13026                                          MLX5_FLOW_ACTION_DECAP), error))
13027                                 return -rte_errno;
13028                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
13029                         break;
13030                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
13031                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
13032                         if (flow_dv_convert_action_modify_tcp_seq
13033                                         (mhdr_res, actions, error))
13034                                 return -rte_errno;
13035                         action_flags |= actions->type ==
13036                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
13037                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
13038                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
13039                         break;
13040
13041                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
13042                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
13043                         if (flow_dv_convert_action_modify_tcp_ack
13044                                         (mhdr_res, actions, error))
13045                                 return -rte_errno;
13046                         action_flags |= actions->type ==
13047                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
13048                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
13049                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
13050                         break;
13051                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
13052                         if (flow_dv_convert_action_set_reg
13053                                         (mhdr_res, actions, error))
13054                                 return -rte_errno;
13055                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13056                         break;
13057                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
13058                         if (flow_dv_convert_action_copy_mreg
13059                                         (dev, mhdr_res, actions, error))
13060                                 return -rte_errno;
13061                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13062                         break;
13063                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
13064                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
13065                         dev_flow->handle->fate_action =
13066                                         MLX5_FLOW_FATE_DEFAULT_MISS;
13067                         break;
13068                 case RTE_FLOW_ACTION_TYPE_METER:
13069                         if (!wks->fm)
13070                                 return rte_flow_error_set(error, rte_errno,
13071                                         RTE_FLOW_ERROR_TYPE_ACTION,
13072                                         NULL, "Failed to get meter in flow.");
13073                         /* Set the meter action. */
13074                         dev_flow->dv.actions[actions_n++] =
13075                                 wks->fm->meter_action;
13076                         action_flags |= MLX5_FLOW_ACTION_METER;
13077                         break;
13078                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
13079                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
13080                                                               actions, error))
13081                                 return -rte_errno;
13082                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
13083                         break;
13084                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
13085                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
13086                                                               actions, error))
13087                                 return -rte_errno;
13088                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
13089                         break;
13090                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
13091                         sample_act_pos = actions_n;
13092                         sample = (const struct rte_flow_action_sample *)
13093                                  action->conf;
13094                         actions_n++;
13095                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
13096                         /* put encap action into group if work with port id */
13097                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
13098                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
13099                                 sample_act->action_flags |=
13100                                                         MLX5_FLOW_ACTION_ENCAP;
13101                         break;
13102                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
13103                         if (flow_dv_convert_action_modify_field
13104                                         (dev, mhdr_res, actions, attr, error))
13105                                 return -rte_errno;
13106                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
13107                         break;
13108                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
13109                         owner_idx = (uint32_t)(uintptr_t)action->conf;
13110                         ct = flow_aso_ct_get_by_idx(dev, owner_idx);
13111                         if (!ct)
13112                                 return rte_flow_error_set(error, EINVAL,
13113                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13114                                                 NULL,
13115                                                 "Failed to get CT object.");
13116                         if (mlx5_aso_ct_available(priv->sh, ct))
13117                                 return rte_flow_error_set(error, rte_errno,
13118                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13119                                                 NULL,
13120                                                 "CT is unavailable.");
13121                         if (ct->is_original)
13122                                 dev_flow->dv.actions[actions_n] =
13123                                                         ct->dr_action_orig;
13124                         else
13125                                 dev_flow->dv.actions[actions_n] =
13126                                                         ct->dr_action_rply;
13127                         flow->indirect_type = MLX5_INDIRECT_ACTION_TYPE_CT;
13128                         flow->ct = owner_idx;
13129                         __atomic_fetch_add(&ct->refcnt, 1, __ATOMIC_RELAXED);
13130                         actions_n++;
13131                         action_flags |= MLX5_FLOW_ACTION_CT;
13132                         break;
13133                 case RTE_FLOW_ACTION_TYPE_END:
13134                         actions_end = true;
13135                         if (mhdr_res->actions_num) {
13136                                 /* create modify action if needed. */
13137                                 if (flow_dv_modify_hdr_resource_register
13138                                         (dev, mhdr_res, dev_flow, error))
13139                                         return -rte_errno;
13140                                 dev_flow->dv.actions[modify_action_position] =
13141                                         handle->dvh.modify_hdr->action;
13142                         }
13143                         /*
13144                          * Handle AGE and COUNT action by single HW counter
13145                          * when they are not shared.
13146                          */
13147                         if (action_flags & MLX5_FLOW_ACTION_AGE) {
13148                                 if ((non_shared_age &&
13149                                      count && !count->shared) ||
13150                                     !(priv->sh->flow_hit_aso_en &&
13151                                       (attr->group || attr->transfer))) {
13152                                         /* Creates age by counters. */
13153                                         cnt_act = flow_dv_prepare_counter
13154                                                                 (dev, dev_flow,
13155                                                                  flow, count,
13156                                                                  non_shared_age,
13157                                                                  error);
13158                                         if (!cnt_act)
13159                                                 return -rte_errno;
13160                                         dev_flow->dv.actions[age_act_pos] =
13161                                                                 cnt_act->action;
13162                                         break;
13163                                 }
13164                                 if (!flow->age && non_shared_age) {
13165                                         flow->age = flow_dv_aso_age_alloc
13166                                                                 (dev, error);
13167                                         if (!flow->age)
13168                                                 return -rte_errno;
13169                                         flow_dv_aso_age_params_init
13170                                                     (dev, flow->age,
13171                                                      non_shared_age->context ?
13172                                                      non_shared_age->context :
13173                                                      (void *)(uintptr_t)
13174                                                      (dev_flow->flow_idx),
13175                                                      non_shared_age->timeout);
13176                                 }
13177                                 age_act = flow_aso_age_get_by_idx(dev,
13178                                                                   flow->age);
13179                                 dev_flow->dv.actions[age_act_pos] =
13180                                                              age_act->dr_action;
13181                         }
13182                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
13183                                 /*
13184                                  * Create one count action, to be used
13185                                  * by all sub-flows.
13186                                  */
13187                                 cnt_act = flow_dv_prepare_counter(dev, dev_flow,
13188                                                                   flow, count,
13189                                                                   NULL, error);
13190                                 if (!cnt_act)
13191                                         return -rte_errno;
13192                                 dev_flow->dv.actions[actions_n++] =
13193                                                                 cnt_act->action;
13194                         }
13195                 default:
13196                         break;
13197                 }
13198                 if (mhdr_res->actions_num &&
13199                     modify_action_position == UINT32_MAX)
13200                         modify_action_position = actions_n++;
13201         }
13202         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
13203                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
13204                 int item_type = items->type;
13205
13206                 if (!mlx5_flow_os_item_supported(item_type))
13207                         return rte_flow_error_set(error, ENOTSUP,
13208                                                   RTE_FLOW_ERROR_TYPE_ITEM,
13209                                                   NULL, "item not supported");
13210                 switch (item_type) {
13211                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
13212                         flow_dv_translate_item_port_id
13213                                 (dev, match_mask, match_value, items, attr);
13214                         last_item = MLX5_FLOW_ITEM_PORT_ID;
13215                         break;
13216                 case RTE_FLOW_ITEM_TYPE_ETH:
13217                         flow_dv_translate_item_eth(match_mask, match_value,
13218                                                    items, tunnel,
13219                                                    dev_flow->dv.group);
13220                         matcher.priority = action_flags &
13221                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
13222                                         !dev_flow->external ?
13223                                         MLX5_PRIORITY_MAP_L3 :
13224                                         MLX5_PRIORITY_MAP_L2;
13225                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
13226                                              MLX5_FLOW_LAYER_OUTER_L2;
13227                         break;
13228                 case RTE_FLOW_ITEM_TYPE_VLAN:
13229                         flow_dv_translate_item_vlan(dev_flow,
13230                                                     match_mask, match_value,
13231                                                     items, tunnel,
13232                                                     dev_flow->dv.group);
13233                         matcher.priority = MLX5_PRIORITY_MAP_L2;
13234                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
13235                                               MLX5_FLOW_LAYER_INNER_VLAN) :
13236                                              (MLX5_FLOW_LAYER_OUTER_L2 |
13237                                               MLX5_FLOW_LAYER_OUTER_VLAN);
13238                         break;
13239                 case RTE_FLOW_ITEM_TYPE_IPV4:
13240                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13241                                                   &item_flags, &tunnel);
13242                         flow_dv_translate_item_ipv4(match_mask, match_value,
13243                                                     items, tunnel,
13244                                                     dev_flow->dv.group);
13245                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13246                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
13247                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
13248                         if (items->mask != NULL &&
13249                             ((const struct rte_flow_item_ipv4 *)
13250                              items->mask)->hdr.next_proto_id) {
13251                                 next_protocol =
13252                                         ((const struct rte_flow_item_ipv4 *)
13253                                          (items->spec))->hdr.next_proto_id;
13254                                 next_protocol &=
13255                                         ((const struct rte_flow_item_ipv4 *)
13256                                          (items->mask))->hdr.next_proto_id;
13257                         } else {
13258                                 /* Reset for inner layer. */
13259                                 next_protocol = 0xff;
13260                         }
13261                         break;
13262                 case RTE_FLOW_ITEM_TYPE_IPV6:
13263                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13264                                                   &item_flags, &tunnel);
13265                         flow_dv_translate_item_ipv6(match_mask, match_value,
13266                                                     items, tunnel,
13267                                                     dev_flow->dv.group);
13268                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13269                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
13270                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
13271                         if (items->mask != NULL &&
13272                             ((const struct rte_flow_item_ipv6 *)
13273                              items->mask)->hdr.proto) {
13274                                 next_protocol =
13275                                         ((const struct rte_flow_item_ipv6 *)
13276                                          items->spec)->hdr.proto;
13277                                 next_protocol &=
13278                                         ((const struct rte_flow_item_ipv6 *)
13279                                          items->mask)->hdr.proto;
13280                         } else {
13281                                 /* Reset for inner layer. */
13282                                 next_protocol = 0xff;
13283                         }
13284                         break;
13285                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
13286                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
13287                                                              match_value,
13288                                                              items, tunnel);
13289                         last_item = tunnel ?
13290                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
13291                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
13292                         if (items->mask != NULL &&
13293                             ((const struct rte_flow_item_ipv6_frag_ext *)
13294                              items->mask)->hdr.next_header) {
13295                                 next_protocol =
13296                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13297                                  items->spec)->hdr.next_header;
13298                                 next_protocol &=
13299                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13300                                  items->mask)->hdr.next_header;
13301                         } else {
13302                                 /* Reset for inner layer. */
13303                                 next_protocol = 0xff;
13304                         }
13305                         break;
13306                 case RTE_FLOW_ITEM_TYPE_TCP:
13307                         flow_dv_translate_item_tcp(match_mask, match_value,
13308                                                    items, tunnel);
13309                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13310                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
13311                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
13312                         break;
13313                 case RTE_FLOW_ITEM_TYPE_UDP:
13314                         flow_dv_translate_item_udp(match_mask, match_value,
13315                                                    items, tunnel);
13316                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13317                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
13318                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
13319                         break;
13320                 case RTE_FLOW_ITEM_TYPE_GRE:
13321                         flow_dv_translate_item_gre(match_mask, match_value,
13322                                                    items, tunnel);
13323                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13324                         last_item = MLX5_FLOW_LAYER_GRE;
13325                         break;
13326                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
13327                         flow_dv_translate_item_gre_key(match_mask,
13328                                                        match_value, items);
13329                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
13330                         break;
13331                 case RTE_FLOW_ITEM_TYPE_NVGRE:
13332                         flow_dv_translate_item_nvgre(match_mask, match_value,
13333                                                      items, tunnel);
13334                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13335                         last_item = MLX5_FLOW_LAYER_GRE;
13336                         break;
13337                 case RTE_FLOW_ITEM_TYPE_VXLAN:
13338                         flow_dv_translate_item_vxlan(dev, attr,
13339                                                      match_mask, match_value,
13340                                                      items, tunnel);
13341                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13342                         last_item = MLX5_FLOW_LAYER_VXLAN;
13343                         break;
13344                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
13345                         flow_dv_translate_item_vxlan_gpe(match_mask,
13346                                                          match_value, items,
13347                                                          tunnel);
13348                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13349                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
13350                         break;
13351                 case RTE_FLOW_ITEM_TYPE_GENEVE:
13352                         flow_dv_translate_item_geneve(match_mask, match_value,
13353                                                       items, tunnel);
13354                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13355                         last_item = MLX5_FLOW_LAYER_GENEVE;
13356                         break;
13357                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
13358                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
13359                                                           match_value,
13360                                                           items, error);
13361                         if (ret)
13362                                 return rte_flow_error_set(error, -ret,
13363                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13364                                         "cannot create GENEVE TLV option");
13365                         flow->geneve_tlv_option = 1;
13366                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
13367                         break;
13368                 case RTE_FLOW_ITEM_TYPE_MPLS:
13369                         flow_dv_translate_item_mpls(match_mask, match_value,
13370                                                     items, last_item, tunnel);
13371                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13372                         last_item = MLX5_FLOW_LAYER_MPLS;
13373                         break;
13374                 case RTE_FLOW_ITEM_TYPE_MARK:
13375                         flow_dv_translate_item_mark(dev, match_mask,
13376                                                     match_value, items);
13377                         last_item = MLX5_FLOW_ITEM_MARK;
13378                         break;
13379                 case RTE_FLOW_ITEM_TYPE_META:
13380                         flow_dv_translate_item_meta(dev, match_mask,
13381                                                     match_value, attr, items);
13382                         last_item = MLX5_FLOW_ITEM_METADATA;
13383                         break;
13384                 case RTE_FLOW_ITEM_TYPE_ICMP:
13385                         flow_dv_translate_item_icmp(match_mask, match_value,
13386                                                     items, tunnel);
13387                         last_item = MLX5_FLOW_LAYER_ICMP;
13388                         break;
13389                 case RTE_FLOW_ITEM_TYPE_ICMP6:
13390                         flow_dv_translate_item_icmp6(match_mask, match_value,
13391                                                       items, tunnel);
13392                         last_item = MLX5_FLOW_LAYER_ICMP6;
13393                         break;
13394                 case RTE_FLOW_ITEM_TYPE_TAG:
13395                         flow_dv_translate_item_tag(dev, match_mask,
13396                                                    match_value, items);
13397                         last_item = MLX5_FLOW_ITEM_TAG;
13398                         break;
13399                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
13400                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
13401                                                         match_value, items);
13402                         last_item = MLX5_FLOW_ITEM_TAG;
13403                         break;
13404                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
13405                         flow_dv_translate_item_tx_queue(dev, match_mask,
13406                                                         match_value,
13407                                                         items);
13408                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
13409                         break;
13410                 case RTE_FLOW_ITEM_TYPE_GTP:
13411                         flow_dv_translate_item_gtp(match_mask, match_value,
13412                                                    items, tunnel);
13413                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13414                         last_item = MLX5_FLOW_LAYER_GTP;
13415                         break;
13416                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
13417                         ret = flow_dv_translate_item_gtp_psc(match_mask,
13418                                                           match_value,
13419                                                           items);
13420                         if (ret)
13421                                 return rte_flow_error_set(error, -ret,
13422                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13423                                         "cannot create GTP PSC item");
13424                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
13425                         break;
13426                 case RTE_FLOW_ITEM_TYPE_ECPRI:
13427                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
13428                                 /* Create it only the first time to be used. */
13429                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
13430                                 if (ret)
13431                                         return rte_flow_error_set
13432                                                 (error, -ret,
13433                                                 RTE_FLOW_ERROR_TYPE_ITEM,
13434                                                 NULL,
13435                                                 "cannot create eCPRI parser");
13436                         }
13437                         flow_dv_translate_item_ecpri(dev, match_mask,
13438                                                      match_value, items);
13439                         /* No other protocol should follow eCPRI layer. */
13440                         last_item = MLX5_FLOW_LAYER_ECPRI;
13441                         break;
13442                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
13443                         flow_dv_translate_item_integrity(match_mask,
13444                                                          match_value,
13445                                                          head_item, items);
13446                         break;
13447                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
13448                         flow_dv_translate_item_aso_ct(dev, match_mask,
13449                                                       match_value, items);
13450                         break;
13451                 default:
13452                         break;
13453                 }
13454                 item_flags |= last_item;
13455         }
13456         /*
13457          * When E-Switch mode is enabled, we have two cases where we need to
13458          * set the source port manually.
13459          * The first one, is in case of Nic steering rule, and the second is
13460          * E-Switch rule where no port_id item was found. In both cases
13461          * the source port is set according the current port in use.
13462          */
13463         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
13464             (priv->representor || priv->master)) {
13465                 if (flow_dv_translate_item_port_id(dev, match_mask,
13466                                                    match_value, NULL, attr))
13467                         return -rte_errno;
13468         }
13469 #ifdef RTE_LIBRTE_MLX5_DEBUG
13470         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
13471                                               dev_flow->dv.value.buf));
13472 #endif
13473         /*
13474          * Layers may be already initialized from prefix flow if this dev_flow
13475          * is the suffix flow.
13476          */
13477         handle->layers |= item_flags;
13478         if (action_flags & MLX5_FLOW_ACTION_RSS)
13479                 flow_dv_hashfields_set(dev_flow, rss_desc);
13480         /* If has RSS action in the sample action, the Sample/Mirror resource
13481          * should be registered after the hash filed be update.
13482          */
13483         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
13484                 ret = flow_dv_translate_action_sample(dev,
13485                                                       sample,
13486                                                       dev_flow, attr,
13487                                                       &num_of_dest,
13488                                                       sample_actions,
13489                                                       &sample_res,
13490                                                       error);
13491                 if (ret < 0)
13492                         return ret;
13493                 ret = flow_dv_create_action_sample(dev,
13494                                                    dev_flow,
13495                                                    num_of_dest,
13496                                                    &sample_res,
13497                                                    &mdest_res,
13498                                                    sample_actions,
13499                                                    action_flags,
13500                                                    error);
13501                 if (ret < 0)
13502                         return rte_flow_error_set
13503                                                 (error, rte_errno,
13504                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13505                                                 NULL,
13506                                                 "cannot create sample action");
13507                 if (num_of_dest > 1) {
13508                         dev_flow->dv.actions[sample_act_pos] =
13509                         dev_flow->dv.dest_array_res->action;
13510                 } else {
13511                         dev_flow->dv.actions[sample_act_pos] =
13512                         dev_flow->dv.sample_res->verbs_action;
13513                 }
13514         }
13515         /*
13516          * For multiple destination (sample action with ratio=1), the encap
13517          * action and port id action will be combined into group action.
13518          * So need remove the original these actions in the flow and only
13519          * use the sample action instead of.
13520          */
13521         if (num_of_dest > 1 &&
13522             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
13523                 int i;
13524                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
13525
13526                 for (i = 0; i < actions_n; i++) {
13527                         if ((sample_act->dr_encap_action &&
13528                                 sample_act->dr_encap_action ==
13529                                 dev_flow->dv.actions[i]) ||
13530                                 (sample_act->dr_port_id_action &&
13531                                 sample_act->dr_port_id_action ==
13532                                 dev_flow->dv.actions[i]) ||
13533                                 (sample_act->dr_jump_action &&
13534                                 sample_act->dr_jump_action ==
13535                                 dev_flow->dv.actions[i]))
13536                                 continue;
13537                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
13538                 }
13539                 memcpy((void *)dev_flow->dv.actions,
13540                                 (void *)temp_actions,
13541                                 tmp_actions_n * sizeof(void *));
13542                 actions_n = tmp_actions_n;
13543         }
13544         dev_flow->dv.actions_n = actions_n;
13545         dev_flow->act_flags = action_flags;
13546         if (wks->skip_matcher_reg)
13547                 return 0;
13548         /* Register matcher. */
13549         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
13550                                     matcher.mask.size);
13551         matcher.priority = mlx5_get_matcher_priority(dev, attr,
13552                                         matcher.priority);
13553         /**
13554          * When creating meter drop flow in drop table, using original
13555          * 5-tuple match, the matcher priority should be lower than
13556          * mtr_id matcher.
13557          */
13558         if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
13559             dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP &&
13560             matcher.priority <= MLX5_REG_BITS)
13561                 matcher.priority += MLX5_REG_BITS;
13562         /* reserved field no needs to be set to 0 here. */
13563         tbl_key.is_fdb = attr->transfer;
13564         tbl_key.is_egress = attr->egress;
13565         tbl_key.level = dev_flow->dv.group;
13566         tbl_key.id = dev_flow->dv.table_id;
13567         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
13568                                      tunnel, attr->group, error))
13569                 return -rte_errno;
13570         return 0;
13571 }
13572
13573 /**
13574  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13575  * and tunnel.
13576  *
13577  * @param[in, out] action
13578  *   Shred RSS action holding hash RX queue objects.
13579  * @param[in] hash_fields
13580  *   Defines combination of packet fields to participate in RX hash.
13581  * @param[in] tunnel
13582  *   Tunnel type
13583  * @param[in] hrxq_idx
13584  *   Hash RX queue index to set.
13585  *
13586  * @return
13587  *   0 on success, otherwise negative errno value.
13588  */
13589 static int
13590 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
13591                               const uint64_t hash_fields,
13592                               uint32_t hrxq_idx)
13593 {
13594         uint32_t *hrxqs = action->hrxq;
13595
13596         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13597         case MLX5_RSS_HASH_IPV4:
13598                 /* fall-through. */
13599         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13600                 /* fall-through. */
13601         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13602                 hrxqs[0] = hrxq_idx;
13603                 return 0;
13604         case MLX5_RSS_HASH_IPV4_TCP:
13605                 /* fall-through. */
13606         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13607                 /* fall-through. */
13608         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13609                 hrxqs[1] = hrxq_idx;
13610                 return 0;
13611         case MLX5_RSS_HASH_IPV4_UDP:
13612                 /* fall-through. */
13613         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13614                 /* fall-through. */
13615         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13616                 hrxqs[2] = hrxq_idx;
13617                 return 0;
13618         case MLX5_RSS_HASH_IPV6:
13619                 /* fall-through. */
13620         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13621                 /* fall-through. */
13622         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13623                 hrxqs[3] = hrxq_idx;
13624                 return 0;
13625         case MLX5_RSS_HASH_IPV6_TCP:
13626                 /* fall-through. */
13627         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13628                 /* fall-through. */
13629         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13630                 hrxqs[4] = hrxq_idx;
13631                 return 0;
13632         case MLX5_RSS_HASH_IPV6_UDP:
13633                 /* fall-through. */
13634         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13635                 /* fall-through. */
13636         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13637                 hrxqs[5] = hrxq_idx;
13638                 return 0;
13639         case MLX5_RSS_HASH_NONE:
13640                 hrxqs[6] = hrxq_idx;
13641                 return 0;
13642         default:
13643                 return -1;
13644         }
13645 }
13646
13647 /**
13648  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13649  * and tunnel.
13650  *
13651  * @param[in] dev
13652  *   Pointer to the Ethernet device structure.
13653  * @param[in] idx
13654  *   Shared RSS action ID holding hash RX queue objects.
13655  * @param[in] hash_fields
13656  *   Defines combination of packet fields to participate in RX hash.
13657  * @param[in] tunnel
13658  *   Tunnel type
13659  *
13660  * @return
13661  *   Valid hash RX queue index, otherwise 0.
13662  */
13663 static uint32_t
13664 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
13665                                  const uint64_t hash_fields)
13666 {
13667         struct mlx5_priv *priv = dev->data->dev_private;
13668         struct mlx5_shared_action_rss *shared_rss =
13669             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13670         const uint32_t *hrxqs = shared_rss->hrxq;
13671
13672         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13673         case MLX5_RSS_HASH_IPV4:
13674                 /* fall-through. */
13675         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13676                 /* fall-through. */
13677         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13678                 return hrxqs[0];
13679         case MLX5_RSS_HASH_IPV4_TCP:
13680                 /* fall-through. */
13681         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13682                 /* fall-through. */
13683         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13684                 return hrxqs[1];
13685         case MLX5_RSS_HASH_IPV4_UDP:
13686                 /* fall-through. */
13687         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13688                 /* fall-through. */
13689         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13690                 return hrxqs[2];
13691         case MLX5_RSS_HASH_IPV6:
13692                 /* fall-through. */
13693         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13694                 /* fall-through. */
13695         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13696                 return hrxqs[3];
13697         case MLX5_RSS_HASH_IPV6_TCP:
13698                 /* fall-through. */
13699         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13700                 /* fall-through. */
13701         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13702                 return hrxqs[4];
13703         case MLX5_RSS_HASH_IPV6_UDP:
13704                 /* fall-through. */
13705         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13706                 /* fall-through. */
13707         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13708                 return hrxqs[5];
13709         case MLX5_RSS_HASH_NONE:
13710                 return hrxqs[6];
13711         default:
13712                 return 0;
13713         }
13714
13715 }
13716
13717 /**
13718  * Apply the flow to the NIC, lock free,
13719  * (mutex should be acquired by caller).
13720  *
13721  * @param[in] dev
13722  *   Pointer to the Ethernet device structure.
13723  * @param[in, out] flow
13724  *   Pointer to flow structure.
13725  * @param[out] error
13726  *   Pointer to error structure.
13727  *
13728  * @return
13729  *   0 on success, a negative errno value otherwise and rte_errno is set.
13730  */
13731 static int
13732 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
13733               struct rte_flow_error *error)
13734 {
13735         struct mlx5_flow_dv_workspace *dv;
13736         struct mlx5_flow_handle *dh;
13737         struct mlx5_flow_handle_dv *dv_h;
13738         struct mlx5_flow *dev_flow;
13739         struct mlx5_priv *priv = dev->data->dev_private;
13740         uint32_t handle_idx;
13741         int n;
13742         int err;
13743         int idx;
13744         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
13745         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
13746         uint8_t misc_mask;
13747
13748         MLX5_ASSERT(wks);
13749         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
13750                 dev_flow = &wks->flows[idx];
13751                 dv = &dev_flow->dv;
13752                 dh = dev_flow->handle;
13753                 dv_h = &dh->dvh;
13754                 n = dv->actions_n;
13755                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
13756                         if (dv->transfer) {
13757                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13758                                 dv->actions[n++] = priv->sh->dr_drop_action;
13759                         } else {
13760 #ifdef HAVE_MLX5DV_DR
13761                                 /* DR supports drop action placeholder. */
13762                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13763                                 dv->actions[n++] = priv->sh->dr_drop_action;
13764 #else
13765                                 /* For DV we use the explicit drop queue. */
13766                                 MLX5_ASSERT(priv->drop_queue.hrxq);
13767                                 dv->actions[n++] =
13768                                                 priv->drop_queue.hrxq->action;
13769 #endif
13770                         }
13771                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
13772                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
13773                         struct mlx5_hrxq *hrxq;
13774                         uint32_t hrxq_idx;
13775
13776                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
13777                                                     &hrxq_idx);
13778                         if (!hrxq) {
13779                                 rte_flow_error_set
13780                                         (error, rte_errno,
13781                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13782                                          "cannot get hash queue");
13783                                 goto error;
13784                         }
13785                         dh->rix_hrxq = hrxq_idx;
13786                         dv->actions[n++] = hrxq->action;
13787                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13788                         struct mlx5_hrxq *hrxq = NULL;
13789                         uint32_t hrxq_idx;
13790
13791                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
13792                                                 rss_desc->shared_rss,
13793                                                 dev_flow->hash_fields);
13794                         if (hrxq_idx)
13795                                 hrxq = mlx5_ipool_get
13796                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
13797                                          hrxq_idx);
13798                         if (!hrxq) {
13799                                 rte_flow_error_set
13800                                         (error, rte_errno,
13801                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13802                                          "cannot get hash queue");
13803                                 goto error;
13804                         }
13805                         dh->rix_srss = rss_desc->shared_rss;
13806                         dv->actions[n++] = hrxq->action;
13807                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
13808                         if (!priv->sh->default_miss_action) {
13809                                 rte_flow_error_set
13810                                         (error, rte_errno,
13811                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13812                                          "default miss action not be created.");
13813                                 goto error;
13814                         }
13815                         dv->actions[n++] = priv->sh->default_miss_action;
13816                 }
13817                 misc_mask = flow_dv_matcher_enable(dv->value.buf);
13818                 __flow_dv_adjust_buf_size(&dv->value.size, misc_mask);
13819                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
13820                                                (void *)&dv->value, n,
13821                                                dv->actions, &dh->drv_flow);
13822                 if (err) {
13823                         rte_flow_error_set
13824                                 (error, errno,
13825                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13826                                 NULL,
13827                                 (!priv->config.allow_duplicate_pattern &&
13828                                 errno == EEXIST) ?
13829                                 "duplicating pattern is not allowed" :
13830                                 "hardware refuses to create flow");
13831                         goto error;
13832                 }
13833                 if (priv->vmwa_context &&
13834                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
13835                         /*
13836                          * The rule contains the VLAN pattern.
13837                          * For VF we are going to create VLAN
13838                          * interface to make hypervisor set correct
13839                          * e-Switch vport context.
13840                          */
13841                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
13842                 }
13843         }
13844         return 0;
13845 error:
13846         err = rte_errno; /* Save rte_errno before cleanup. */
13847         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
13848                        handle_idx, dh, next) {
13849                 /* hrxq is union, don't clear it if the flag is not set. */
13850                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
13851                         mlx5_hrxq_release(dev, dh->rix_hrxq);
13852                         dh->rix_hrxq = 0;
13853                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13854                         dh->rix_srss = 0;
13855                 }
13856                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
13857                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
13858         }
13859         rte_errno = err; /* Restore rte_errno. */
13860         return -rte_errno;
13861 }
13862
13863 void
13864 flow_dv_matcher_remove_cb(void *tool_ctx __rte_unused,
13865                           struct mlx5_list_entry *entry)
13866 {
13867         struct mlx5_flow_dv_matcher *resource = container_of(entry,
13868                                                              typeof(*resource),
13869                                                              entry);
13870
13871         claim_zero(mlx5_flow_os_destroy_flow_matcher(resource->matcher_object));
13872         mlx5_free(resource);
13873 }
13874
13875 /**
13876  * Release the flow matcher.
13877  *
13878  * @param dev
13879  *   Pointer to Ethernet device.
13880  * @param port_id
13881  *   Index to port ID action resource.
13882  *
13883  * @return
13884  *   1 while a reference on it exists, 0 when freed.
13885  */
13886 static int
13887 flow_dv_matcher_release(struct rte_eth_dev *dev,
13888                         struct mlx5_flow_handle *handle)
13889 {
13890         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
13891         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
13892                                                             typeof(*tbl), tbl);
13893         int ret;
13894
13895         MLX5_ASSERT(matcher->matcher_object);
13896         ret = mlx5_list_unregister(tbl->matchers, &matcher->entry);
13897         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
13898         return ret;
13899 }
13900
13901 void
13902 flow_dv_encap_decap_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
13903 {
13904         struct mlx5_dev_ctx_shared *sh = tool_ctx;
13905         struct mlx5_flow_dv_encap_decap_resource *res =
13906                                        container_of(entry, typeof(*res), entry);
13907
13908         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13909         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
13910 }
13911
13912 /**
13913  * Release an encap/decap resource.
13914  *
13915  * @param dev
13916  *   Pointer to Ethernet device.
13917  * @param encap_decap_idx
13918  *   Index of encap decap resource.
13919  *
13920  * @return
13921  *   1 while a reference on it exists, 0 when freed.
13922  */
13923 static int
13924 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
13925                                      uint32_t encap_decap_idx)
13926 {
13927         struct mlx5_priv *priv = dev->data->dev_private;
13928         struct mlx5_flow_dv_encap_decap_resource *resource;
13929
13930         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
13931                                   encap_decap_idx);
13932         if (!resource)
13933                 return 0;
13934         MLX5_ASSERT(resource->action);
13935         return mlx5_hlist_unregister(priv->sh->encaps_decaps, &resource->entry);
13936 }
13937
13938 /**
13939  * Release an jump to table action resource.
13940  *
13941  * @param dev
13942  *   Pointer to Ethernet device.
13943  * @param rix_jump
13944  *   Index to the jump action resource.
13945  *
13946  * @return
13947  *   1 while a reference on it exists, 0 when freed.
13948  */
13949 static int
13950 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
13951                                   uint32_t rix_jump)
13952 {
13953         struct mlx5_priv *priv = dev->data->dev_private;
13954         struct mlx5_flow_tbl_data_entry *tbl_data;
13955
13956         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
13957                                   rix_jump);
13958         if (!tbl_data)
13959                 return 0;
13960         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
13961 }
13962
13963 void
13964 flow_dv_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
13965 {
13966         struct mlx5_flow_dv_modify_hdr_resource *res =
13967                 container_of(entry, typeof(*res), entry);
13968         struct mlx5_dev_ctx_shared *sh = tool_ctx;
13969
13970         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13971         mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
13972 }
13973
13974 /**
13975  * Release a modify-header resource.
13976  *
13977  * @param dev
13978  *   Pointer to Ethernet device.
13979  * @param handle
13980  *   Pointer to mlx5_flow_handle.
13981  *
13982  * @return
13983  *   1 while a reference on it exists, 0 when freed.
13984  */
13985 static int
13986 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
13987                                     struct mlx5_flow_handle *handle)
13988 {
13989         struct mlx5_priv *priv = dev->data->dev_private;
13990         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
13991
13992         MLX5_ASSERT(entry->action);
13993         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
13994 }
13995
13996 void
13997 flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
13998 {
13999         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14000         struct mlx5_flow_dv_port_id_action_resource *resource =
14001                                   container_of(entry, typeof(*resource), entry);
14002
14003         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14004         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
14005 }
14006
14007 /**
14008  * Release port ID action resource.
14009  *
14010  * @param dev
14011  *   Pointer to Ethernet device.
14012  * @param handle
14013  *   Pointer to mlx5_flow_handle.
14014  *
14015  * @return
14016  *   1 while a reference on it exists, 0 when freed.
14017  */
14018 static int
14019 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
14020                                         uint32_t port_id)
14021 {
14022         struct mlx5_priv *priv = dev->data->dev_private;
14023         struct mlx5_flow_dv_port_id_action_resource *resource;
14024
14025         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
14026         if (!resource)
14027                 return 0;
14028         MLX5_ASSERT(resource->action);
14029         return mlx5_list_unregister(priv->sh->port_id_action_list,
14030                                     &resource->entry);
14031 }
14032
14033 /**
14034  * Release shared RSS action resource.
14035  *
14036  * @param dev
14037  *   Pointer to Ethernet device.
14038  * @param srss
14039  *   Shared RSS action index.
14040  */
14041 static void
14042 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
14043 {
14044         struct mlx5_priv *priv = dev->data->dev_private;
14045         struct mlx5_shared_action_rss *shared_rss;
14046
14047         shared_rss = mlx5_ipool_get
14048                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
14049         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14050 }
14051
14052 void
14053 flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14054 {
14055         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14056         struct mlx5_flow_dv_push_vlan_action_resource *resource =
14057                         container_of(entry, typeof(*resource), entry);
14058
14059         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14060         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
14061 }
14062
14063 /**
14064  * Release push vlan action resource.
14065  *
14066  * @param dev
14067  *   Pointer to Ethernet device.
14068  * @param handle
14069  *   Pointer to mlx5_flow_handle.
14070  *
14071  * @return
14072  *   1 while a reference on it exists, 0 when freed.
14073  */
14074 static int
14075 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
14076                                           struct mlx5_flow_handle *handle)
14077 {
14078         struct mlx5_priv *priv = dev->data->dev_private;
14079         struct mlx5_flow_dv_push_vlan_action_resource *resource;
14080         uint32_t idx = handle->dvh.rix_push_vlan;
14081
14082         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
14083         if (!resource)
14084                 return 0;
14085         MLX5_ASSERT(resource->action);
14086         return mlx5_list_unregister(priv->sh->push_vlan_action_list,
14087                                     &resource->entry);
14088 }
14089
14090 /**
14091  * Release the fate resource.
14092  *
14093  * @param dev
14094  *   Pointer to Ethernet device.
14095  * @param handle
14096  *   Pointer to mlx5_flow_handle.
14097  */
14098 static void
14099 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
14100                                struct mlx5_flow_handle *handle)
14101 {
14102         if (!handle->rix_fate)
14103                 return;
14104         switch (handle->fate_action) {
14105         case MLX5_FLOW_FATE_QUEUE:
14106                 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
14107                         mlx5_hrxq_release(dev, handle->rix_hrxq);
14108                 break;
14109         case MLX5_FLOW_FATE_JUMP:
14110                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
14111                 break;
14112         case MLX5_FLOW_FATE_PORT_ID:
14113                 flow_dv_port_id_action_resource_release(dev,
14114                                 handle->rix_port_id_action);
14115                 break;
14116         default:
14117                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
14118                 break;
14119         }
14120         handle->rix_fate = 0;
14121 }
14122
14123 void
14124 flow_dv_sample_remove_cb(void *tool_ctx __rte_unused,
14125                          struct mlx5_list_entry *entry)
14126 {
14127         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
14128                                                               typeof(*resource),
14129                                                               entry);
14130         struct rte_eth_dev *dev = resource->dev;
14131         struct mlx5_priv *priv = dev->data->dev_private;
14132
14133         if (resource->verbs_action)
14134                 claim_zero(mlx5_flow_os_destroy_flow_action
14135                                                       (resource->verbs_action));
14136         if (resource->normal_path_tbl)
14137                 flow_dv_tbl_resource_release(MLX5_SH(dev),
14138                                              resource->normal_path_tbl);
14139         flow_dv_sample_sub_actions_release(dev, &resource->sample_idx);
14140         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
14141         DRV_LOG(DEBUG, "sample resource %p: removed", (void *)resource);
14142 }
14143
14144 /**
14145  * Release an sample resource.
14146  *
14147  * @param dev
14148  *   Pointer to Ethernet device.
14149  * @param handle
14150  *   Pointer to mlx5_flow_handle.
14151  *
14152  * @return
14153  *   1 while a reference on it exists, 0 when freed.
14154  */
14155 static int
14156 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
14157                                      struct mlx5_flow_handle *handle)
14158 {
14159         struct mlx5_priv *priv = dev->data->dev_private;
14160         struct mlx5_flow_dv_sample_resource *resource;
14161
14162         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
14163                                   handle->dvh.rix_sample);
14164         if (!resource)
14165                 return 0;
14166         MLX5_ASSERT(resource->verbs_action);
14167         return mlx5_list_unregister(priv->sh->sample_action_list,
14168                                     &resource->entry);
14169 }
14170
14171 void
14172 flow_dv_dest_array_remove_cb(void *tool_ctx __rte_unused,
14173                              struct mlx5_list_entry *entry)
14174 {
14175         struct mlx5_flow_dv_dest_array_resource *resource =
14176                         container_of(entry, typeof(*resource), entry);
14177         struct rte_eth_dev *dev = resource->dev;
14178         struct mlx5_priv *priv = dev->data->dev_private;
14179         uint32_t i = 0;
14180
14181         MLX5_ASSERT(resource->action);
14182         if (resource->action)
14183                 claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14184         for (; i < resource->num_of_dest; i++)
14185                 flow_dv_sample_sub_actions_release(dev,
14186                                                    &resource->sample_idx[i]);
14187         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
14188         DRV_LOG(DEBUG, "destination array resource %p: removed",
14189                 (void *)resource);
14190 }
14191
14192 /**
14193  * Release an destination array resource.
14194  *
14195  * @param dev
14196  *   Pointer to Ethernet device.
14197  * @param handle
14198  *   Pointer to mlx5_flow_handle.
14199  *
14200  * @return
14201  *   1 while a reference on it exists, 0 when freed.
14202  */
14203 static int
14204 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
14205                                     struct mlx5_flow_handle *handle)
14206 {
14207         struct mlx5_priv *priv = dev->data->dev_private;
14208         struct mlx5_flow_dv_dest_array_resource *resource;
14209
14210         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
14211                                   handle->dvh.rix_dest_array);
14212         if (!resource)
14213                 return 0;
14214         MLX5_ASSERT(resource->action);
14215         return mlx5_list_unregister(priv->sh->dest_array_list,
14216                                     &resource->entry);
14217 }
14218
14219 static void
14220 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
14221 {
14222         struct mlx5_priv *priv = dev->data->dev_private;
14223         struct mlx5_dev_ctx_shared *sh = priv->sh;
14224         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
14225                                 sh->geneve_tlv_option_resource;
14226         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
14227         if (geneve_opt_resource) {
14228                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
14229                                          __ATOMIC_RELAXED))) {
14230                         claim_zero(mlx5_devx_cmd_destroy
14231                                         (geneve_opt_resource->obj));
14232                         mlx5_free(sh->geneve_tlv_option_resource);
14233                         sh->geneve_tlv_option_resource = NULL;
14234                 }
14235         }
14236         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
14237 }
14238
14239 /**
14240  * Remove the flow from the NIC but keeps it in memory.
14241  * Lock free, (mutex should be acquired by caller).
14242  *
14243  * @param[in] dev
14244  *   Pointer to Ethernet device.
14245  * @param[in, out] flow
14246  *   Pointer to flow structure.
14247  */
14248 static void
14249 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
14250 {
14251         struct mlx5_flow_handle *dh;
14252         uint32_t handle_idx;
14253         struct mlx5_priv *priv = dev->data->dev_private;
14254
14255         if (!flow)
14256                 return;
14257         handle_idx = flow->dev_handles;
14258         while (handle_idx) {
14259                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14260                                     handle_idx);
14261                 if (!dh)
14262                         return;
14263                 if (dh->drv_flow) {
14264                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
14265                         dh->drv_flow = NULL;
14266                 }
14267                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
14268                         flow_dv_fate_resource_release(dev, dh);
14269                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
14270                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
14271                 handle_idx = dh->next.next;
14272         }
14273 }
14274
14275 /**
14276  * Remove the flow from the NIC and the memory.
14277  * Lock free, (mutex should be acquired by caller).
14278  *
14279  * @param[in] dev
14280  *   Pointer to the Ethernet device structure.
14281  * @param[in, out] flow
14282  *   Pointer to flow structure.
14283  */
14284 static void
14285 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
14286 {
14287         struct mlx5_flow_handle *dev_handle;
14288         struct mlx5_priv *priv = dev->data->dev_private;
14289         struct mlx5_flow_meter_info *fm = NULL;
14290         uint32_t srss = 0;
14291
14292         if (!flow)
14293                 return;
14294         flow_dv_remove(dev, flow);
14295         if (flow->counter) {
14296                 flow_dv_counter_free(dev, flow->counter);
14297                 flow->counter = 0;
14298         }
14299         if (flow->meter) {
14300                 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
14301                 if (fm)
14302                         mlx5_flow_meter_detach(priv, fm);
14303                 flow->meter = 0;
14304         }
14305         /* Keep the current age handling by default. */
14306         if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
14307                 flow_dv_aso_ct_release(dev, flow->ct);
14308         else if (flow->age)
14309                 flow_dv_aso_age_release(dev, flow->age);
14310         if (flow->geneve_tlv_option) {
14311                 flow_dv_geneve_tlv_option_resource_release(dev);
14312                 flow->geneve_tlv_option = 0;
14313         }
14314         while (flow->dev_handles) {
14315                 uint32_t tmp_idx = flow->dev_handles;
14316
14317                 dev_handle = mlx5_ipool_get(priv->sh->ipool
14318                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
14319                 if (!dev_handle)
14320                         return;
14321                 flow->dev_handles = dev_handle->next.next;
14322                 if (dev_handle->dvh.matcher)
14323                         flow_dv_matcher_release(dev, dev_handle);
14324                 if (dev_handle->dvh.rix_sample)
14325                         flow_dv_sample_resource_release(dev, dev_handle);
14326                 if (dev_handle->dvh.rix_dest_array)
14327                         flow_dv_dest_array_resource_release(dev, dev_handle);
14328                 if (dev_handle->dvh.rix_encap_decap)
14329                         flow_dv_encap_decap_resource_release(dev,
14330                                 dev_handle->dvh.rix_encap_decap);
14331                 if (dev_handle->dvh.modify_hdr)
14332                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
14333                 if (dev_handle->dvh.rix_push_vlan)
14334                         flow_dv_push_vlan_action_resource_release(dev,
14335                                                                   dev_handle);
14336                 if (dev_handle->dvh.rix_tag)
14337                         flow_dv_tag_release(dev,
14338                                             dev_handle->dvh.rix_tag);
14339                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
14340                         flow_dv_fate_resource_release(dev, dev_handle);
14341                 else if (!srss)
14342                         srss = dev_handle->rix_srss;
14343                 if (fm && dev_handle->is_meter_flow_id &&
14344                     dev_handle->split_flow_id)
14345                         mlx5_ipool_free(fm->flow_ipool,
14346                                         dev_handle->split_flow_id);
14347                 else if (dev_handle->split_flow_id &&
14348                     !dev_handle->is_meter_flow_id)
14349                         mlx5_ipool_free(priv->sh->ipool
14350                                         [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
14351                                         dev_handle->split_flow_id);
14352                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14353                            tmp_idx);
14354         }
14355         if (srss)
14356                 flow_dv_shared_rss_action_release(dev, srss);
14357 }
14358
14359 /**
14360  * Release array of hash RX queue objects.
14361  * Helper function.
14362  *
14363  * @param[in] dev
14364  *   Pointer to the Ethernet device structure.
14365  * @param[in, out] hrxqs
14366  *   Array of hash RX queue objects.
14367  *
14368  * @return
14369  *   Total number of references to hash RX queue objects in *hrxqs* array
14370  *   after this operation.
14371  */
14372 static int
14373 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
14374                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
14375 {
14376         size_t i;
14377         int remaining = 0;
14378
14379         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
14380                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
14381
14382                 if (!ret)
14383                         (*hrxqs)[i] = 0;
14384                 remaining += ret;
14385         }
14386         return remaining;
14387 }
14388
14389 /**
14390  * Release all hash RX queue objects representing shared RSS action.
14391  *
14392  * @param[in] dev
14393  *   Pointer to the Ethernet device structure.
14394  * @param[in, out] action
14395  *   Shared RSS action to remove hash RX queue objects from.
14396  *
14397  * @return
14398  *   Total number of references to hash RX queue objects stored in *action*
14399  *   after this operation.
14400  *   Expected to be 0 if no external references held.
14401  */
14402 static int
14403 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
14404                                  struct mlx5_shared_action_rss *shared_rss)
14405 {
14406         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
14407 }
14408
14409 /**
14410  * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
14411  * user input.
14412  *
14413  * Only one hash value is available for one L3+L4 combination:
14414  * for example:
14415  * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
14416  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
14417  * same slot in mlx5_rss_hash_fields.
14418  *
14419  * @param[in] rss
14420  *   Pointer to the shared action RSS conf.
14421  * @param[in, out] hash_field
14422  *   hash_field variable needed to be adjusted.
14423  *
14424  * @return
14425  *   void
14426  */
14427 static void
14428 __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
14429                                      uint64_t *hash_field)
14430 {
14431         uint64_t rss_types = rss->origin.types;
14432
14433         switch (*hash_field & ~IBV_RX_HASH_INNER) {
14434         case MLX5_RSS_HASH_IPV4:
14435                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
14436                         *hash_field &= ~MLX5_RSS_HASH_IPV4;
14437                         if (rss_types & ETH_RSS_L3_DST_ONLY)
14438                                 *hash_field |= IBV_RX_HASH_DST_IPV4;
14439                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
14440                                 *hash_field |= IBV_RX_HASH_SRC_IPV4;
14441                         else
14442                                 *hash_field |= MLX5_RSS_HASH_IPV4;
14443                 }
14444                 return;
14445         case MLX5_RSS_HASH_IPV6:
14446                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
14447                         *hash_field &= ~MLX5_RSS_HASH_IPV6;
14448                         if (rss_types & ETH_RSS_L3_DST_ONLY)
14449                                 *hash_field |= IBV_RX_HASH_DST_IPV6;
14450                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
14451                                 *hash_field |= IBV_RX_HASH_SRC_IPV6;
14452                         else
14453                                 *hash_field |= MLX5_RSS_HASH_IPV6;
14454                 }
14455                 return;
14456         case MLX5_RSS_HASH_IPV4_UDP:
14457                 /* fall-through. */
14458         case MLX5_RSS_HASH_IPV6_UDP:
14459                 if (rss_types & ETH_RSS_UDP) {
14460                         *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
14461                         if (rss_types & ETH_RSS_L4_DST_ONLY)
14462                                 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
14463                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
14464                                 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
14465                         else
14466                                 *hash_field |= MLX5_UDP_IBV_RX_HASH;
14467                 }
14468                 return;
14469         case MLX5_RSS_HASH_IPV4_TCP:
14470                 /* fall-through. */
14471         case MLX5_RSS_HASH_IPV6_TCP:
14472                 if (rss_types & ETH_RSS_TCP) {
14473                         *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
14474                         if (rss_types & ETH_RSS_L4_DST_ONLY)
14475                                 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
14476                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
14477                                 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
14478                         else
14479                                 *hash_field |= MLX5_TCP_IBV_RX_HASH;
14480                 }
14481                 return;
14482         default:
14483                 return;
14484         }
14485 }
14486
14487 /**
14488  * Setup shared RSS action.
14489  * Prepare set of hash RX queue objects sufficient to handle all valid
14490  * hash_fields combinations (see enum ibv_rx_hash_fields).
14491  *
14492  * @param[in] dev
14493  *   Pointer to the Ethernet device structure.
14494  * @param[in] action_idx
14495  *   Shared RSS action ipool index.
14496  * @param[in, out] action
14497  *   Partially initialized shared RSS action.
14498  * @param[out] error
14499  *   Perform verbose error reporting if not NULL. Initialized in case of
14500  *   error only.
14501  *
14502  * @return
14503  *   0 on success, otherwise negative errno value.
14504  */
14505 static int
14506 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
14507                            uint32_t action_idx,
14508                            struct mlx5_shared_action_rss *shared_rss,
14509                            struct rte_flow_error *error)
14510 {
14511         struct mlx5_flow_rss_desc rss_desc = { 0 };
14512         size_t i;
14513         int err;
14514
14515         if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
14516                 return rte_flow_error_set(error, rte_errno,
14517                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14518                                           "cannot setup indirection table");
14519         }
14520         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
14521         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
14522         rss_desc.const_q = shared_rss->origin.queue;
14523         rss_desc.queue_num = shared_rss->origin.queue_num;
14524         /* Set non-zero value to indicate a shared RSS. */
14525         rss_desc.shared_rss = action_idx;
14526         rss_desc.ind_tbl = shared_rss->ind_tbl;
14527         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
14528                 uint32_t hrxq_idx;
14529                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
14530                 int tunnel = 0;
14531
14532                 __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
14533                 if (shared_rss->origin.level > 1) {
14534                         hash_fields |= IBV_RX_HASH_INNER;
14535                         tunnel = 1;
14536                 }
14537                 rss_desc.tunnel = tunnel;
14538                 rss_desc.hash_fields = hash_fields;
14539                 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
14540                 if (!hrxq_idx) {
14541                         rte_flow_error_set
14542                                 (error, rte_errno,
14543                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14544                                  "cannot get hash queue");
14545                         goto error_hrxq_new;
14546                 }
14547                 err = __flow_dv_action_rss_hrxq_set
14548                         (shared_rss, hash_fields, hrxq_idx);
14549                 MLX5_ASSERT(!err);
14550         }
14551         return 0;
14552 error_hrxq_new:
14553         err = rte_errno;
14554         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14555         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
14556                 shared_rss->ind_tbl = NULL;
14557         rte_errno = err;
14558         return -rte_errno;
14559 }
14560
14561 /**
14562  * Create shared RSS action.
14563  *
14564  * @param[in] dev
14565  *   Pointer to the Ethernet device structure.
14566  * @param[in] conf
14567  *   Shared action configuration.
14568  * @param[in] rss
14569  *   RSS action specification used to create shared action.
14570  * @param[out] error
14571  *   Perform verbose error reporting if not NULL. Initialized in case of
14572  *   error only.
14573  *
14574  * @return
14575  *   A valid shared action ID in case of success, 0 otherwise and
14576  *   rte_errno is set.
14577  */
14578 static uint32_t
14579 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
14580                             const struct rte_flow_indir_action_conf *conf,
14581                             const struct rte_flow_action_rss *rss,
14582                             struct rte_flow_error *error)
14583 {
14584         struct mlx5_priv *priv = dev->data->dev_private;
14585         struct mlx5_shared_action_rss *shared_rss = NULL;
14586         void *queue = NULL;
14587         struct rte_flow_action_rss *origin;
14588         const uint8_t *rss_key;
14589         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
14590         uint32_t idx;
14591
14592         RTE_SET_USED(conf);
14593         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14594                             0, SOCKET_ID_ANY);
14595         shared_rss = mlx5_ipool_zmalloc
14596                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
14597         if (!shared_rss || !queue) {
14598                 rte_flow_error_set(error, ENOMEM,
14599                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14600                                    "cannot allocate resource memory");
14601                 goto error_rss_init;
14602         }
14603         if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
14604                 rte_flow_error_set(error, E2BIG,
14605                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14606                                    "rss action number out of range");
14607                 goto error_rss_init;
14608         }
14609         shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
14610                                           sizeof(*shared_rss->ind_tbl),
14611                                           0, SOCKET_ID_ANY);
14612         if (!shared_rss->ind_tbl) {
14613                 rte_flow_error_set(error, ENOMEM,
14614                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14615                                    "cannot allocate resource memory");
14616                 goto error_rss_init;
14617         }
14618         memcpy(queue, rss->queue, queue_size);
14619         shared_rss->ind_tbl->queues = queue;
14620         shared_rss->ind_tbl->queues_n = rss->queue_num;
14621         origin = &shared_rss->origin;
14622         origin->func = rss->func;
14623         origin->level = rss->level;
14624         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
14625         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
14626         /* NULL RSS key indicates default RSS key. */
14627         rss_key = !rss->key ? rss_hash_default_key : rss->key;
14628         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
14629         origin->key = &shared_rss->key[0];
14630         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
14631         origin->queue = queue;
14632         origin->queue_num = rss->queue_num;
14633         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
14634                 goto error_rss_init;
14635         rte_spinlock_init(&shared_rss->action_rss_sl);
14636         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14637         rte_spinlock_lock(&priv->shared_act_sl);
14638         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14639                      &priv->rss_shared_actions, idx, shared_rss, next);
14640         rte_spinlock_unlock(&priv->shared_act_sl);
14641         return idx;
14642 error_rss_init:
14643         if (shared_rss) {
14644                 if (shared_rss->ind_tbl)
14645                         mlx5_free(shared_rss->ind_tbl);
14646                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14647                                 idx);
14648         }
14649         if (queue)
14650                 mlx5_free(queue);
14651         return 0;
14652 }
14653
14654 /**
14655  * Destroy the shared RSS action.
14656  * Release related hash RX queue objects.
14657  *
14658  * @param[in] dev
14659  *   Pointer to the Ethernet device structure.
14660  * @param[in] idx
14661  *   The shared RSS action object ID to be removed.
14662  * @param[out] error
14663  *   Perform verbose error reporting if not NULL. Initialized in case of
14664  *   error only.
14665  *
14666  * @return
14667  *   0 on success, otherwise negative errno value.
14668  */
14669 static int
14670 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
14671                              struct rte_flow_error *error)
14672 {
14673         struct mlx5_priv *priv = dev->data->dev_private;
14674         struct mlx5_shared_action_rss *shared_rss =
14675             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14676         uint32_t old_refcnt = 1;
14677         int remaining;
14678         uint16_t *queue = NULL;
14679
14680         if (!shared_rss)
14681                 return rte_flow_error_set(error, EINVAL,
14682                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14683                                           "invalid shared action");
14684         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14685         if (remaining)
14686                 return rte_flow_error_set(error, EBUSY,
14687                                           RTE_FLOW_ERROR_TYPE_ACTION,
14688                                           NULL,
14689                                           "shared rss hrxq has references");
14690         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
14691                                          0, 0, __ATOMIC_ACQUIRE,
14692                                          __ATOMIC_RELAXED))
14693                 return rte_flow_error_set(error, EBUSY,
14694                                           RTE_FLOW_ERROR_TYPE_ACTION,
14695                                           NULL,
14696                                           "shared rss has references");
14697         queue = shared_rss->ind_tbl->queues;
14698         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
14699         if (remaining)
14700                 return rte_flow_error_set(error, EBUSY,
14701                                           RTE_FLOW_ERROR_TYPE_ACTION,
14702                                           NULL,
14703                                           "shared rss indirection table has"
14704                                           " references");
14705         mlx5_free(queue);
14706         rte_spinlock_lock(&priv->shared_act_sl);
14707         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14708                      &priv->rss_shared_actions, idx, shared_rss, next);
14709         rte_spinlock_unlock(&priv->shared_act_sl);
14710         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14711                         idx);
14712         return 0;
14713 }
14714
14715 /**
14716  * Create indirect action, lock free,
14717  * (mutex should be acquired by caller).
14718  * Dispatcher for action type specific call.
14719  *
14720  * @param[in] dev
14721  *   Pointer to the Ethernet device structure.
14722  * @param[in] conf
14723  *   Shared action configuration.
14724  * @param[in] action
14725  *   Action specification used to create indirect action.
14726  * @param[out] error
14727  *   Perform verbose error reporting if not NULL. Initialized in case of
14728  *   error only.
14729  *
14730  * @return
14731  *   A valid shared action handle in case of success, NULL otherwise and
14732  *   rte_errno is set.
14733  */
14734 static struct rte_flow_action_handle *
14735 flow_dv_action_create(struct rte_eth_dev *dev,
14736                       const struct rte_flow_indir_action_conf *conf,
14737                       const struct rte_flow_action *action,
14738                       struct rte_flow_error *err)
14739 {
14740         struct mlx5_priv *priv = dev->data->dev_private;
14741         uint32_t age_idx = 0;
14742         uint32_t idx = 0;
14743         uint32_t ret = 0;
14744
14745         switch (action->type) {
14746         case RTE_FLOW_ACTION_TYPE_RSS:
14747                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
14748                 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
14749                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14750                 break;
14751         case RTE_FLOW_ACTION_TYPE_AGE:
14752                 age_idx = flow_dv_aso_age_alloc(dev, err);
14753                 if (!age_idx) {
14754                         ret = -rte_errno;
14755                         break;
14756                 }
14757                 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
14758                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
14759                 flow_dv_aso_age_params_init(dev, age_idx,
14760                                         ((const struct rte_flow_action_age *)
14761                                                 action->conf)->context ?
14762                                         ((const struct rte_flow_action_age *)
14763                                                 action->conf)->context :
14764                                         (void *)(uintptr_t)idx,
14765                                         ((const struct rte_flow_action_age *)
14766                                                 action->conf)->timeout);
14767                 ret = age_idx;
14768                 break;
14769         case RTE_FLOW_ACTION_TYPE_COUNT:
14770                 ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
14771                 idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
14772                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14773                 break;
14774         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
14775                 ret = flow_dv_translate_create_conntrack(dev, action->conf,
14776                                                          err);
14777                 idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
14778                 break;
14779         default:
14780                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
14781                                    NULL, "action type not supported");
14782                 break;
14783         }
14784         return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
14785 }
14786
14787 /**
14788  * Destroy the indirect action.
14789  * Release action related resources on the NIC and the memory.
14790  * Lock free, (mutex should be acquired by caller).
14791  * Dispatcher for action type specific call.
14792  *
14793  * @param[in] dev
14794  *   Pointer to the Ethernet device structure.
14795  * @param[in] handle
14796  *   The indirect action object handle to be removed.
14797  * @param[out] error
14798  *   Perform verbose error reporting if not NULL. Initialized in case of
14799  *   error only.
14800  *
14801  * @return
14802  *   0 on success, otherwise negative errno value.
14803  */
14804 static int
14805 flow_dv_action_destroy(struct rte_eth_dev *dev,
14806                        struct rte_flow_action_handle *handle,
14807                        struct rte_flow_error *error)
14808 {
14809         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14810         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14811         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14812         struct mlx5_flow_counter *cnt;
14813         uint32_t no_flow_refcnt = 1;
14814         int ret;
14815
14816         switch (type) {
14817         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14818                 return __flow_dv_action_rss_release(dev, idx, error);
14819         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
14820                 cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
14821                 if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
14822                                                  &no_flow_refcnt, 1, false,
14823                                                  __ATOMIC_ACQUIRE,
14824                                                  __ATOMIC_RELAXED))
14825                         return rte_flow_error_set(error, EBUSY,
14826                                                   RTE_FLOW_ERROR_TYPE_ACTION,
14827                                                   NULL,
14828                                                   "Indirect count action has references");
14829                 flow_dv_counter_free(dev, idx);
14830                 return 0;
14831         case MLX5_INDIRECT_ACTION_TYPE_AGE:
14832                 ret = flow_dv_aso_age_release(dev, idx);
14833                 if (ret)
14834                         /*
14835                          * In this case, the last flow has a reference will
14836                          * actually release the age action.
14837                          */
14838                         DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
14839                                 " released with references %d.", idx, ret);
14840                 return 0;
14841         case MLX5_INDIRECT_ACTION_TYPE_CT:
14842                 ret = flow_dv_aso_ct_release(dev, idx);
14843                 if (ret < 0)
14844                         return ret;
14845                 if (ret > 0)
14846                         DRV_LOG(DEBUG, "Connection tracking object %u still "
14847                                 "has references %d.", idx, ret);
14848                 return 0;
14849         default:
14850                 return rte_flow_error_set(error, ENOTSUP,
14851                                           RTE_FLOW_ERROR_TYPE_ACTION,
14852                                           NULL,
14853                                           "action type not supported");
14854         }
14855 }
14856
14857 /**
14858  * Updates in place shared RSS action configuration.
14859  *
14860  * @param[in] dev
14861  *   Pointer to the Ethernet device structure.
14862  * @param[in] idx
14863  *   The shared RSS action object ID to be updated.
14864  * @param[in] action_conf
14865  *   RSS action specification used to modify *shared_rss*.
14866  * @param[out] error
14867  *   Perform verbose error reporting if not NULL. Initialized in case of
14868  *   error only.
14869  *
14870  * @return
14871  *   0 on success, otherwise negative errno value.
14872  * @note: currently only support update of RSS queues.
14873  */
14874 static int
14875 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
14876                             const struct rte_flow_action_rss *action_conf,
14877                             struct rte_flow_error *error)
14878 {
14879         struct mlx5_priv *priv = dev->data->dev_private;
14880         struct mlx5_shared_action_rss *shared_rss =
14881             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14882         int ret = 0;
14883         void *queue = NULL;
14884         uint16_t *queue_old = NULL;
14885         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
14886
14887         if (!shared_rss)
14888                 return rte_flow_error_set(error, EINVAL,
14889                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14890                                           "invalid shared action to update");
14891         if (priv->obj_ops.ind_table_modify == NULL)
14892                 return rte_flow_error_set(error, ENOTSUP,
14893                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14894                                           "cannot modify indirection table");
14895         queue = mlx5_malloc(MLX5_MEM_ZERO,
14896                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14897                             0, SOCKET_ID_ANY);
14898         if (!queue)
14899                 return rte_flow_error_set(error, ENOMEM,
14900                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14901                                           NULL,
14902                                           "cannot allocate resource memory");
14903         memcpy(queue, action_conf->queue, queue_size);
14904         MLX5_ASSERT(shared_rss->ind_tbl);
14905         rte_spinlock_lock(&shared_rss->action_rss_sl);
14906         queue_old = shared_rss->ind_tbl->queues;
14907         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
14908                                         queue, action_conf->queue_num, true);
14909         if (ret) {
14910                 mlx5_free(queue);
14911                 ret = rte_flow_error_set(error, rte_errno,
14912                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14913                                           "cannot update indirection table");
14914         } else {
14915                 mlx5_free(queue_old);
14916                 shared_rss->origin.queue = queue;
14917                 shared_rss->origin.queue_num = action_conf->queue_num;
14918         }
14919         rte_spinlock_unlock(&shared_rss->action_rss_sl);
14920         return ret;
14921 }
14922
14923 /*
14924  * Updates in place conntrack context or direction.
14925  * Context update should be synchronized.
14926  *
14927  * @param[in] dev
14928  *   Pointer to the Ethernet device structure.
14929  * @param[in] idx
14930  *   The conntrack object ID to be updated.
14931  * @param[in] update
14932  *   Pointer to the structure of information to update.
14933  * @param[out] error
14934  *   Perform verbose error reporting if not NULL. Initialized in case of
14935  *   error only.
14936  *
14937  * @return
14938  *   0 on success, otherwise negative errno value.
14939  */
14940 static int
14941 __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,
14942                            const struct rte_flow_modify_conntrack *update,
14943                            struct rte_flow_error *error)
14944 {
14945         struct mlx5_priv *priv = dev->data->dev_private;
14946         struct mlx5_aso_ct_action *ct;
14947         const struct rte_flow_action_conntrack *new_prf;
14948         int ret = 0;
14949         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
14950         uint32_t dev_idx;
14951
14952         if (PORT_ID(priv) != owner)
14953                 return rte_flow_error_set(error, EACCES,
14954                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14955                                           NULL,
14956                                           "CT object owned by another port");
14957         dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
14958         ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
14959         if (!ct->refcnt)
14960                 return rte_flow_error_set(error, ENOMEM,
14961                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14962                                           NULL,
14963                                           "CT object is inactive");
14964         new_prf = &update->new_ct;
14965         if (update->direction)
14966                 ct->is_original = !!new_prf->is_original_dir;
14967         if (update->state) {
14968                 /* Only validate the profile when it needs to be updated. */
14969                 ret = mlx5_validate_action_ct(dev, new_prf, error);
14970                 if (ret)
14971                         return ret;
14972                 ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
14973                 if (ret)
14974                         return rte_flow_error_set(error, EIO,
14975                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14976                                         NULL,
14977                                         "Failed to send CT context update WQE");
14978                 /* Block until ready or a failure. */
14979                 ret = mlx5_aso_ct_available(priv->sh, ct);
14980                 if (ret)
14981                         rte_flow_error_set(error, rte_errno,
14982                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14983                                            NULL,
14984                                            "Timeout to get the CT update");
14985         }
14986         return ret;
14987 }
14988
14989 /**
14990  * Updates in place shared action configuration, lock free,
14991  * (mutex should be acquired by caller).
14992  *
14993  * @param[in] dev
14994  *   Pointer to the Ethernet device structure.
14995  * @param[in] handle
14996  *   The indirect action object handle to be updated.
14997  * @param[in] update
14998  *   Action specification used to modify the action pointed by *handle*.
14999  *   *update* could be of same type with the action pointed by the *handle*
15000  *   handle argument, or some other structures like a wrapper, depending on
15001  *   the indirect action type.
15002  * @param[out] error
15003  *   Perform verbose error reporting if not NULL. Initialized in case of
15004  *   error only.
15005  *
15006  * @return
15007  *   0 on success, otherwise negative errno value.
15008  */
15009 static int
15010 flow_dv_action_update(struct rte_eth_dev *dev,
15011                         struct rte_flow_action_handle *handle,
15012                         const void *update,
15013                         struct rte_flow_error *err)
15014 {
15015         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15016         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15017         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15018         const void *action_conf;
15019
15020         switch (type) {
15021         case MLX5_INDIRECT_ACTION_TYPE_RSS:
15022                 action_conf = ((const struct rte_flow_action *)update)->conf;
15023                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
15024         case MLX5_INDIRECT_ACTION_TYPE_CT:
15025                 return __flow_dv_action_ct_update(dev, idx, update, err);
15026         default:
15027                 return rte_flow_error_set(err, ENOTSUP,
15028                                           RTE_FLOW_ERROR_TYPE_ACTION,
15029                                           NULL,
15030                                           "action type update not supported");
15031         }
15032 }
15033
15034 /**
15035  * Destroy the meter sub policy table rules.
15036  * Lock free, (mutex should be acquired by caller).
15037  *
15038  * @param[in] dev
15039  *   Pointer to Ethernet device.
15040  * @param[in] sub_policy
15041  *   Pointer to meter sub policy table.
15042  */
15043 static void
15044 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
15045                              struct mlx5_flow_meter_sub_policy *sub_policy)
15046 {
15047         struct mlx5_priv *priv = dev->data->dev_private;
15048         struct mlx5_flow_tbl_data_entry *tbl;
15049         struct mlx5_flow_meter_policy *policy = sub_policy->main_policy;
15050         struct mlx5_flow_meter_info *next_fm;
15051         struct mlx5_sub_policy_color_rule *color_rule;
15052         void *tmp;
15053         uint32_t i;
15054
15055         for (i = 0; i < RTE_COLORS; i++) {
15056                 next_fm = NULL;
15057                 if (i == RTE_COLOR_GREEN && policy &&
15058                     policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR)
15059                         next_fm = mlx5_flow_meter_find(priv,
15060                                         policy->act_cnt[i].next_mtr_id, NULL);
15061                 TAILQ_FOREACH_SAFE(color_rule, &sub_policy->color_rules[i],
15062                                    next_port, tmp) {
15063                         claim_zero(mlx5_flow_os_destroy_flow(color_rule->rule));
15064                         tbl = container_of(color_rule->matcher->tbl,
15065                                         typeof(*tbl), tbl);
15066                         mlx5_list_unregister(tbl->matchers,
15067                                                 &color_rule->matcher->entry);
15068                         TAILQ_REMOVE(&sub_policy->color_rules[i],
15069                                         color_rule, next_port);
15070                         mlx5_free(color_rule);
15071                         if (next_fm)
15072                                 mlx5_flow_meter_detach(priv, next_fm);
15073                 }
15074         }
15075         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15076                 if (sub_policy->rix_hrxq[i]) {
15077                         if (policy && !policy->is_hierarchy)
15078                                 mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
15079                         sub_policy->rix_hrxq[i] = 0;
15080                 }
15081                 if (sub_policy->jump_tbl[i]) {
15082                         flow_dv_tbl_resource_release(MLX5_SH(dev),
15083                         sub_policy->jump_tbl[i]);
15084                         sub_policy->jump_tbl[i] = NULL;
15085                 }
15086         }
15087         if (sub_policy->tbl_rsc) {
15088                 flow_dv_tbl_resource_release(MLX5_SH(dev),
15089                         sub_policy->tbl_rsc);
15090                 sub_policy->tbl_rsc = NULL;
15091         }
15092 }
15093
15094 /**
15095  * Destroy policy rules, lock free,
15096  * (mutex should be acquired by caller).
15097  * Dispatcher for action type specific call.
15098  *
15099  * @param[in] dev
15100  *   Pointer to the Ethernet device structure.
15101  * @param[in] mtr_policy
15102  *   Meter policy struct.
15103  */
15104 static void
15105 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
15106                       struct mlx5_flow_meter_policy *mtr_policy)
15107 {
15108         uint32_t i, j;
15109         struct mlx5_flow_meter_sub_policy *sub_policy;
15110         uint16_t sub_policy_num;
15111
15112         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15113                 sub_policy_num = (mtr_policy->sub_policy_num >>
15114                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15115                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15116                 for (j = 0; j < sub_policy_num; j++) {
15117                         sub_policy = mtr_policy->sub_policys[i][j];
15118                         if (sub_policy)
15119                                 __flow_dv_destroy_sub_policy_rules
15120                                                 (dev, sub_policy);
15121                 }
15122         }
15123 }
15124
15125 /**
15126  * Destroy policy action, lock free,
15127  * (mutex should be acquired by caller).
15128  * Dispatcher for action type specific call.
15129  *
15130  * @param[in] dev
15131  *   Pointer to the Ethernet device structure.
15132  * @param[in] mtr_policy
15133  *   Meter policy struct.
15134  */
15135 static void
15136 flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
15137                       struct mlx5_flow_meter_policy *mtr_policy)
15138 {
15139         struct rte_flow_action *rss_action;
15140         struct mlx5_flow_handle dev_handle;
15141         uint32_t i, j;
15142
15143         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15144                 if (mtr_policy->act_cnt[i].rix_mark) {
15145                         flow_dv_tag_release(dev,
15146                                 mtr_policy->act_cnt[i].rix_mark);
15147                         mtr_policy->act_cnt[i].rix_mark = 0;
15148                 }
15149                 if (mtr_policy->act_cnt[i].modify_hdr) {
15150                         dev_handle.dvh.modify_hdr =
15151                                 mtr_policy->act_cnt[i].modify_hdr;
15152                         flow_dv_modify_hdr_resource_release(dev, &dev_handle);
15153                 }
15154                 switch (mtr_policy->act_cnt[i].fate_action) {
15155                 case MLX5_FLOW_FATE_SHARED_RSS:
15156                         rss_action = mtr_policy->act_cnt[i].rss;
15157                         mlx5_free(rss_action);
15158                         break;
15159                 case MLX5_FLOW_FATE_PORT_ID:
15160                         if (mtr_policy->act_cnt[i].rix_port_id_action) {
15161                                 flow_dv_port_id_action_resource_release(dev,
15162                                 mtr_policy->act_cnt[i].rix_port_id_action);
15163                                 mtr_policy->act_cnt[i].rix_port_id_action = 0;
15164                         }
15165                         break;
15166                 case MLX5_FLOW_FATE_DROP:
15167                 case MLX5_FLOW_FATE_JUMP:
15168                         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15169                                 mtr_policy->act_cnt[i].dr_jump_action[j] =
15170                                                 NULL;
15171                         break;
15172                 default:
15173                         /*Queue action do nothing*/
15174                         break;
15175                 }
15176         }
15177         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15178                 mtr_policy->dr_drop_action[j] = NULL;
15179 }
15180
15181 /**
15182  * Create policy action per domain, lock free,
15183  * (mutex should be acquired by caller).
15184  * Dispatcher for action type specific call.
15185  *
15186  * @param[in] dev
15187  *   Pointer to the Ethernet device structure.
15188  * @param[in] mtr_policy
15189  *   Meter policy struct.
15190  * @param[in] action
15191  *   Action specification used to create meter actions.
15192  * @param[out] error
15193  *   Perform verbose error reporting if not NULL. Initialized in case of
15194  *   error only.
15195  *
15196  * @return
15197  *   0 on success, otherwise negative errno value.
15198  */
15199 static int
15200 __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
15201                         struct mlx5_flow_meter_policy *mtr_policy,
15202                         const struct rte_flow_action *actions[RTE_COLORS],
15203                         enum mlx5_meter_domain domain,
15204                         struct rte_mtr_error *error)
15205 {
15206         struct mlx5_priv *priv = dev->data->dev_private;
15207         struct rte_flow_error flow_err;
15208         const struct rte_flow_action *act;
15209         uint64_t action_flags;
15210         struct mlx5_flow_handle dh;
15211         struct mlx5_flow dev_flow;
15212         struct mlx5_flow_dv_port_id_action_resource port_id_action;
15213         int i, ret;
15214         uint8_t egress, transfer;
15215         struct mlx5_meter_policy_action_container *act_cnt = NULL;
15216         union {
15217                 struct mlx5_flow_dv_modify_hdr_resource res;
15218                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
15219                             sizeof(struct mlx5_modification_cmd) *
15220                             (MLX5_MAX_MODIFY_NUM + 1)];
15221         } mhdr_dummy;
15222         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
15223
15224         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15225         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15226         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
15227         memset(&dev_flow, 0, sizeof(struct mlx5_flow));
15228         memset(&port_id_action, 0,
15229                sizeof(struct mlx5_flow_dv_port_id_action_resource));
15230         memset(mhdr_res, 0, sizeof(*mhdr_res));
15231         mhdr_res->ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
15232                                        (egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
15233                                         MLX5DV_FLOW_TABLE_TYPE_NIC_RX);
15234         dev_flow.handle = &dh;
15235         dev_flow.dv.port_id_action = &port_id_action;
15236         dev_flow.external = true;
15237         for (i = 0; i < RTE_COLORS; i++) {
15238                 if (i < MLX5_MTR_RTE_COLORS)
15239                         act_cnt = &mtr_policy->act_cnt[i];
15240                 action_flags = 0;
15241                 for (act = actions[i];
15242                      act && act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
15243                         switch (act->type) {
15244                         case RTE_FLOW_ACTION_TYPE_MARK:
15245                         {
15246                                 uint32_t tag_be = mlx5_flow_mark_set
15247                                         (((const struct rte_flow_action_mark *)
15248                                         (act->conf))->id);
15249
15250                                 if (i >= MLX5_MTR_RTE_COLORS)
15251                                         return -rte_mtr_error_set(error,
15252                                           ENOTSUP,
15253                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15254                                           NULL,
15255                                           "cannot create policy "
15256                                           "mark action for this color");
15257                                 dev_flow.handle->mark = 1;
15258                                 if (flow_dv_tag_resource_register(dev, tag_be,
15259                                                   &dev_flow, &flow_err))
15260                                         return -rte_mtr_error_set(error,
15261                                         ENOTSUP,
15262                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15263                                         NULL,
15264                                         "cannot setup policy mark action");
15265                                 MLX5_ASSERT(dev_flow.dv.tag_resource);
15266                                 act_cnt->rix_mark =
15267                                         dev_flow.handle->dvh.rix_tag;
15268                                 action_flags |= MLX5_FLOW_ACTION_MARK;
15269                                 break;
15270                         }
15271                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
15272                                 if (i >= MLX5_MTR_RTE_COLORS)
15273                                         return -rte_mtr_error_set(error,
15274                                           ENOTSUP,
15275                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15276                                           NULL,
15277                                           "cannot create policy "
15278                                           "set tag action for this color");
15279                                 if (flow_dv_convert_action_set_tag
15280                                 (dev, mhdr_res,
15281                                 (const struct rte_flow_action_set_tag *)
15282                                 act->conf,  &flow_err))
15283                                         return -rte_mtr_error_set(error,
15284                                         ENOTSUP,
15285                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15286                                         NULL, "cannot convert policy "
15287                                         "set tag action");
15288                                 if (!mhdr_res->actions_num)
15289                                         return -rte_mtr_error_set(error,
15290                                         ENOTSUP,
15291                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15292                                         NULL, "cannot find policy "
15293                                         "set tag action");
15294                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
15295                                 break;
15296                         case RTE_FLOW_ACTION_TYPE_DROP:
15297                         {
15298                                 struct mlx5_flow_mtr_mng *mtrmng =
15299                                                 priv->sh->mtrmng;
15300                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15301
15302                                 /*
15303                                  * Create the drop table with
15304                                  * METER DROP level.
15305                                  */
15306                                 if (!mtrmng->drop_tbl[domain]) {
15307                                         mtrmng->drop_tbl[domain] =
15308                                         flow_dv_tbl_resource_get(dev,
15309                                         MLX5_FLOW_TABLE_LEVEL_METER,
15310                                         egress, transfer, false, NULL, 0,
15311                                         0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
15312                                         if (!mtrmng->drop_tbl[domain])
15313                                                 return -rte_mtr_error_set
15314                                         (error, ENOTSUP,
15315                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15316                                         NULL,
15317                                         "Failed to create meter drop table");
15318                                 }
15319                                 tbl_data = container_of
15320                                 (mtrmng->drop_tbl[domain],
15321                                 struct mlx5_flow_tbl_data_entry, tbl);
15322                                 if (i < MLX5_MTR_RTE_COLORS) {
15323                                         act_cnt->dr_jump_action[domain] =
15324                                                 tbl_data->jump.action;
15325                                         act_cnt->fate_action =
15326                                                 MLX5_FLOW_FATE_DROP;
15327                                 }
15328                                 if (i == RTE_COLOR_RED)
15329                                         mtr_policy->dr_drop_action[domain] =
15330                                                 tbl_data->jump.action;
15331                                 action_flags |= MLX5_FLOW_ACTION_DROP;
15332                                 break;
15333                         }
15334                         case RTE_FLOW_ACTION_TYPE_QUEUE:
15335                         {
15336                                 if (i >= MLX5_MTR_RTE_COLORS)
15337                                         return -rte_mtr_error_set(error,
15338                                         ENOTSUP,
15339                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15340                                         NULL, "cannot create policy "
15341                                         "fate queue for this color");
15342                                 act_cnt->queue =
15343                                 ((const struct rte_flow_action_queue *)
15344                                         (act->conf))->index;
15345                                 act_cnt->fate_action =
15346                                         MLX5_FLOW_FATE_QUEUE;
15347                                 dev_flow.handle->fate_action =
15348                                         MLX5_FLOW_FATE_QUEUE;
15349                                 mtr_policy->is_queue = 1;
15350                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
15351                                 break;
15352                         }
15353                         case RTE_FLOW_ACTION_TYPE_RSS:
15354                         {
15355                                 int rss_size;
15356
15357                                 if (i >= MLX5_MTR_RTE_COLORS)
15358                                         return -rte_mtr_error_set(error,
15359                                           ENOTSUP,
15360                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15361                                           NULL,
15362                                           "cannot create policy "
15363                                           "rss action for this color");
15364                                 /*
15365                                  * Save RSS conf into policy struct
15366                                  * for translate stage.
15367                                  */
15368                                 rss_size = (int)rte_flow_conv
15369                                         (RTE_FLOW_CONV_OP_ACTION,
15370                                         NULL, 0, act, &flow_err);
15371                                 if (rss_size <= 0)
15372                                         return -rte_mtr_error_set(error,
15373                                           ENOTSUP,
15374                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15375                                           NULL, "Get the wrong "
15376                                           "rss action struct size");
15377                                 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
15378                                                 rss_size, 0, SOCKET_ID_ANY);
15379                                 if (!act_cnt->rss)
15380                                         return -rte_mtr_error_set(error,
15381                                           ENOTSUP,
15382                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15383                                           NULL,
15384                                           "Fail to malloc rss action memory");
15385                                 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
15386                                         act_cnt->rss, rss_size,
15387                                         act, &flow_err);
15388                                 if (ret < 0)
15389                                         return -rte_mtr_error_set(error,
15390                                           ENOTSUP,
15391                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15392                                           NULL, "Fail to save "
15393                                           "rss action into policy struct");
15394                                 act_cnt->fate_action =
15395                                         MLX5_FLOW_FATE_SHARED_RSS;
15396                                 action_flags |= MLX5_FLOW_ACTION_RSS;
15397                                 break;
15398                         }
15399                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
15400                         {
15401                                 struct mlx5_flow_dv_port_id_action_resource
15402                                         port_id_resource;
15403                                 uint32_t port_id = 0;
15404
15405                                 if (i >= MLX5_MTR_RTE_COLORS)
15406                                         return -rte_mtr_error_set(error,
15407                                         ENOTSUP,
15408                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15409                                         NULL, "cannot create policy "
15410                                         "port action for this color");
15411                                 memset(&port_id_resource, 0,
15412                                         sizeof(port_id_resource));
15413                                 if (flow_dv_translate_action_port_id(dev, act,
15414                                                 &port_id, &flow_err))
15415                                         return -rte_mtr_error_set(error,
15416                                         ENOTSUP,
15417                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15418                                         NULL, "cannot translate "
15419                                         "policy port action");
15420                                 port_id_resource.port_id = port_id;
15421                                 if (flow_dv_port_id_action_resource_register
15422                                         (dev, &port_id_resource,
15423                                         &dev_flow, &flow_err))
15424                                         return -rte_mtr_error_set(error,
15425                                         ENOTSUP,
15426                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15427                                         NULL, "cannot setup "
15428                                         "policy port action");
15429                                 act_cnt->rix_port_id_action =
15430                                         dev_flow.handle->rix_port_id_action;
15431                                 act_cnt->fate_action =
15432                                         MLX5_FLOW_FATE_PORT_ID;
15433                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
15434                                 break;
15435                         }
15436                         case RTE_FLOW_ACTION_TYPE_JUMP:
15437                         {
15438                                 uint32_t jump_group = 0;
15439                                 uint32_t table = 0;
15440                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15441                                 struct flow_grp_info grp_info = {
15442                                         .external = !!dev_flow.external,
15443                                         .transfer = !!transfer,
15444                                         .fdb_def_rule = !!priv->fdb_def_rule,
15445                                         .std_tbl_fix = 0,
15446                                         .skip_scale = dev_flow.skip_scale &
15447                                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
15448                                 };
15449                                 struct mlx5_flow_meter_sub_policy *sub_policy =
15450                                         mtr_policy->sub_policys[domain][0];
15451
15452                                 if (i >= MLX5_MTR_RTE_COLORS)
15453                                         return -rte_mtr_error_set(error,
15454                                           ENOTSUP,
15455                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15456                                           NULL,
15457                                           "cannot create policy "
15458                                           "jump action for this color");
15459                                 jump_group =
15460                                 ((const struct rte_flow_action_jump *)
15461                                                         act->conf)->group;
15462                                 if (mlx5_flow_group_to_table(dev, NULL,
15463                                                        jump_group,
15464                                                        &table,
15465                                                        &grp_info, &flow_err))
15466                                         return -rte_mtr_error_set(error,
15467                                         ENOTSUP,
15468                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15469                                         NULL, "cannot setup "
15470                                         "policy jump action");
15471                                 sub_policy->jump_tbl[i] =
15472                                 flow_dv_tbl_resource_get(dev,
15473                                         table, egress,
15474                                         transfer,
15475                                         !!dev_flow.external,
15476                                         NULL, jump_group, 0,
15477                                         0, &flow_err);
15478                                 if
15479                                 (!sub_policy->jump_tbl[i])
15480                                         return  -rte_mtr_error_set(error,
15481                                         ENOTSUP,
15482                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15483                                         NULL, "cannot create jump action.");
15484                                 tbl_data = container_of
15485                                 (sub_policy->jump_tbl[i],
15486                                 struct mlx5_flow_tbl_data_entry, tbl);
15487                                 act_cnt->dr_jump_action[domain] =
15488                                         tbl_data->jump.action;
15489                                 act_cnt->fate_action =
15490                                         MLX5_FLOW_FATE_JUMP;
15491                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
15492                                 break;
15493                         }
15494                         /*
15495                          * No need to check meter hierarchy for Y or R colors
15496                          * here since it is done in the validation stage.
15497                          */
15498                         case RTE_FLOW_ACTION_TYPE_METER:
15499                         {
15500                                 const struct rte_flow_action_meter *mtr;
15501                                 struct mlx5_flow_meter_info *next_fm;
15502                                 struct mlx5_flow_meter_policy *next_policy;
15503                                 struct rte_flow_action tag_action;
15504                                 struct mlx5_rte_flow_action_set_tag set_tag;
15505                                 uint32_t next_mtr_idx = 0;
15506
15507                                 mtr = act->conf;
15508                                 next_fm = mlx5_flow_meter_find(priv,
15509                                                         mtr->mtr_id,
15510                                                         &next_mtr_idx);
15511                                 if (!next_fm)
15512                                         return -rte_mtr_error_set(error, EINVAL,
15513                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15514                                                 "Fail to find next meter.");
15515                                 if (next_fm->def_policy)
15516                                         return -rte_mtr_error_set(error, EINVAL,
15517                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15518                                 "Hierarchy only supports termination meter.");
15519                                 next_policy = mlx5_flow_meter_policy_find(dev,
15520                                                 next_fm->policy_id, NULL);
15521                                 MLX5_ASSERT(next_policy);
15522                                 if (next_fm->drop_cnt) {
15523                                         set_tag.id =
15524                                                 (enum modify_reg)
15525                                                 mlx5_flow_get_reg_id(dev,
15526                                                 MLX5_MTR_ID,
15527                                                 0,
15528                                                 (struct rte_flow_error *)error);
15529                                         set_tag.offset = (priv->mtr_reg_share ?
15530                                                 MLX5_MTR_COLOR_BITS : 0);
15531                                         set_tag.length = (priv->mtr_reg_share ?
15532                                                MLX5_MTR_IDLE_BITS_IN_COLOR_REG :
15533                                                MLX5_REG_BITS);
15534                                         set_tag.data = next_mtr_idx;
15535                                         tag_action.type =
15536                                                 (enum rte_flow_action_type)
15537                                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG;
15538                                         tag_action.conf = &set_tag;
15539                                         if (flow_dv_convert_action_set_reg
15540                                                 (mhdr_res, &tag_action,
15541                                                 (struct rte_flow_error *)error))
15542                                                 return -rte_errno;
15543                                         action_flags |=
15544                                                 MLX5_FLOW_ACTION_SET_TAG;
15545                                 }
15546                                 act_cnt->fate_action = MLX5_FLOW_FATE_MTR;
15547                                 act_cnt->next_mtr_id = next_fm->meter_id;
15548                                 act_cnt->next_sub_policy = NULL;
15549                                 mtr_policy->is_hierarchy = 1;
15550                                 mtr_policy->dev = next_policy->dev;
15551                                 action_flags |=
15552                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
15553                                 break;
15554                         }
15555                         default:
15556                                 return -rte_mtr_error_set(error, ENOTSUP,
15557                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15558                                           NULL, "action type not supported");
15559                         }
15560                         if (action_flags & MLX5_FLOW_ACTION_SET_TAG) {
15561                                 /* create modify action if needed. */
15562                                 dev_flow.dv.group = 1;
15563                                 if (flow_dv_modify_hdr_resource_register
15564                                         (dev, mhdr_res, &dev_flow, &flow_err))
15565                                         return -rte_mtr_error_set(error,
15566                                                 ENOTSUP,
15567                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15568                                                 NULL, "cannot register policy "
15569                                                 "set tag action");
15570                                 act_cnt->modify_hdr =
15571                                         dev_flow.handle->dvh.modify_hdr;
15572                         }
15573                 }
15574         }
15575         return 0;
15576 }
15577
15578 /**
15579  * Create policy action per domain, lock free,
15580  * (mutex should be acquired by caller).
15581  * Dispatcher for action type specific call.
15582  *
15583  * @param[in] dev
15584  *   Pointer to the Ethernet device structure.
15585  * @param[in] mtr_policy
15586  *   Meter policy struct.
15587  * @param[in] action
15588  *   Action specification used to create meter actions.
15589  * @param[out] error
15590  *   Perform verbose error reporting if not NULL. Initialized in case of
15591  *   error only.
15592  *
15593  * @return
15594  *   0 on success, otherwise negative errno value.
15595  */
15596 static int
15597 flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
15598                       struct mlx5_flow_meter_policy *mtr_policy,
15599                       const struct rte_flow_action *actions[RTE_COLORS],
15600                       struct rte_mtr_error *error)
15601 {
15602         int ret, i;
15603         uint16_t sub_policy_num;
15604
15605         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15606                 sub_policy_num = (mtr_policy->sub_policy_num >>
15607                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15608                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15609                 if (sub_policy_num) {
15610                         ret = __flow_dv_create_domain_policy_acts(dev,
15611                                 mtr_policy, actions,
15612                                 (enum mlx5_meter_domain)i, error);
15613                         /* Cleaning resource is done in the caller level. */
15614                         if (ret)
15615                                 return ret;
15616                 }
15617         }
15618         return 0;
15619 }
15620
15621 /**
15622  * Query a DV flow rule for its statistics via DevX.
15623  *
15624  * @param[in] dev
15625  *   Pointer to Ethernet device.
15626  * @param[in] cnt_idx
15627  *   Index to the flow counter.
15628  * @param[out] data
15629  *   Data retrieved by the query.
15630  * @param[out] error
15631  *   Perform verbose error reporting if not NULL.
15632  *
15633  * @return
15634  *   0 on success, a negative errno value otherwise and rte_errno is set.
15635  */
15636 static int
15637 flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
15638                     struct rte_flow_error *error)
15639 {
15640         struct mlx5_priv *priv = dev->data->dev_private;
15641         struct rte_flow_query_count *qc = data;
15642
15643         if (!priv->config.devx)
15644                 return rte_flow_error_set(error, ENOTSUP,
15645                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15646                                           NULL,
15647                                           "counters are not supported");
15648         if (cnt_idx) {
15649                 uint64_t pkts, bytes;
15650                 struct mlx5_flow_counter *cnt;
15651                 int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
15652
15653                 if (err)
15654                         return rte_flow_error_set(error, -err,
15655                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15656                                         NULL, "cannot read counters");
15657                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15658                 qc->hits_set = 1;
15659                 qc->bytes_set = 1;
15660                 qc->hits = pkts - cnt->hits;
15661                 qc->bytes = bytes - cnt->bytes;
15662                 if (qc->reset) {
15663                         cnt->hits = pkts;
15664                         cnt->bytes = bytes;
15665                 }
15666                 return 0;
15667         }
15668         return rte_flow_error_set(error, EINVAL,
15669                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15670                                   NULL,
15671                                   "counters are not available");
15672 }
15673
15674 static int
15675 flow_dv_action_query(struct rte_eth_dev *dev,
15676                      const struct rte_flow_action_handle *handle, void *data,
15677                      struct rte_flow_error *error)
15678 {
15679         struct mlx5_age_param *age_param;
15680         struct rte_flow_query_age *resp;
15681         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15682         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15683         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15684         struct mlx5_priv *priv = dev->data->dev_private;
15685         struct mlx5_aso_ct_action *ct;
15686         uint16_t owner;
15687         uint32_t dev_idx;
15688
15689         switch (type) {
15690         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15691                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
15692                 resp = data;
15693                 resp->aged = __atomic_load_n(&age_param->state,
15694                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
15695                                                                           1 : 0;
15696                 resp->sec_since_last_hit_valid = !resp->aged;
15697                 if (resp->sec_since_last_hit_valid)
15698                         resp->sec_since_last_hit = __atomic_load_n
15699                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15700                 return 0;
15701         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15702                 return flow_dv_query_count(dev, idx, data, error);
15703         case MLX5_INDIRECT_ACTION_TYPE_CT:
15704                 owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15705                 if (owner != PORT_ID(priv))
15706                         return rte_flow_error_set(error, EACCES,
15707                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15708                                         NULL,
15709                                         "CT object owned by another port");
15710                 dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15711                 ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15712                 MLX5_ASSERT(ct);
15713                 if (!ct->refcnt)
15714                         return rte_flow_error_set(error, EFAULT,
15715                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15716                                         NULL,
15717                                         "CT object is inactive");
15718                 ((struct rte_flow_action_conntrack *)data)->peer_port =
15719                                                         ct->peer;
15720                 ((struct rte_flow_action_conntrack *)data)->is_original_dir =
15721                                                         ct->is_original;
15722                 if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))
15723                         return rte_flow_error_set(error, EIO,
15724                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15725                                         NULL,
15726                                         "Failed to query CT context");
15727                 return 0;
15728         default:
15729                 return rte_flow_error_set(error, ENOTSUP,
15730                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15731                                           "action type query not supported");
15732         }
15733 }
15734
15735 /**
15736  * Query a flow rule AGE action for aging information.
15737  *
15738  * @param[in] dev
15739  *   Pointer to Ethernet device.
15740  * @param[in] flow
15741  *   Pointer to the sub flow.
15742  * @param[out] data
15743  *   data retrieved by the query.
15744  * @param[out] error
15745  *   Perform verbose error reporting if not NULL.
15746  *
15747  * @return
15748  *   0 on success, a negative errno value otherwise and rte_errno is set.
15749  */
15750 static int
15751 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
15752                   void *data, struct rte_flow_error *error)
15753 {
15754         struct rte_flow_query_age *resp = data;
15755         struct mlx5_age_param *age_param;
15756
15757         if (flow->age) {
15758                 struct mlx5_aso_age_action *act =
15759                                      flow_aso_age_get_by_idx(dev, flow->age);
15760
15761                 age_param = &act->age_params;
15762         } else if (flow->counter) {
15763                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
15764
15765                 if (!age_param || !age_param->timeout)
15766                         return rte_flow_error_set
15767                                         (error, EINVAL,
15768                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15769                                          NULL, "cannot read age data");
15770         } else {
15771                 return rte_flow_error_set(error, EINVAL,
15772                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15773                                           NULL, "age data not available");
15774         }
15775         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
15776                                      AGE_TMOUT ? 1 : 0;
15777         resp->sec_since_last_hit_valid = !resp->aged;
15778         if (resp->sec_since_last_hit_valid)
15779                 resp->sec_since_last_hit = __atomic_load_n
15780                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15781         return 0;
15782 }
15783
15784 /**
15785  * Query a flow.
15786  *
15787  * @see rte_flow_query()
15788  * @see rte_flow_ops
15789  */
15790 static int
15791 flow_dv_query(struct rte_eth_dev *dev,
15792               struct rte_flow *flow __rte_unused,
15793               const struct rte_flow_action *actions __rte_unused,
15794               void *data __rte_unused,
15795               struct rte_flow_error *error __rte_unused)
15796 {
15797         int ret = -EINVAL;
15798
15799         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
15800                 switch (actions->type) {
15801                 case RTE_FLOW_ACTION_TYPE_VOID:
15802                         break;
15803                 case RTE_FLOW_ACTION_TYPE_COUNT:
15804                         ret = flow_dv_query_count(dev, flow->counter, data,
15805                                                   error);
15806                         break;
15807                 case RTE_FLOW_ACTION_TYPE_AGE:
15808                         ret = flow_dv_query_age(dev, flow, data, error);
15809                         break;
15810                 default:
15811                         return rte_flow_error_set(error, ENOTSUP,
15812                                                   RTE_FLOW_ERROR_TYPE_ACTION,
15813                                                   actions,
15814                                                   "action not supported");
15815                 }
15816         }
15817         return ret;
15818 }
15819
15820 /**
15821  * Destroy the meter table set.
15822  * Lock free, (mutex should be acquired by caller).
15823  *
15824  * @param[in] dev
15825  *   Pointer to Ethernet device.
15826  * @param[in] fm
15827  *   Meter information table.
15828  */
15829 static void
15830 flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
15831                         struct mlx5_flow_meter_info *fm)
15832 {
15833         struct mlx5_priv *priv = dev->data->dev_private;
15834         int i;
15835
15836         if (!fm || !priv->config.dv_flow_en)
15837                 return;
15838         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15839                 if (fm->drop_rule[i]) {
15840                         claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
15841                         fm->drop_rule[i] = NULL;
15842                 }
15843         }
15844 }
15845
15846 static void
15847 flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
15848 {
15849         struct mlx5_priv *priv = dev->data->dev_private;
15850         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15851         struct mlx5_flow_tbl_data_entry *tbl;
15852         int i, j;
15853
15854         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15855                 if (mtrmng->def_rule[i]) {
15856                         claim_zero(mlx5_flow_os_destroy_flow
15857                                         (mtrmng->def_rule[i]));
15858                         mtrmng->def_rule[i] = NULL;
15859                 }
15860                 if (mtrmng->def_matcher[i]) {
15861                         tbl = container_of(mtrmng->def_matcher[i]->tbl,
15862                                 struct mlx5_flow_tbl_data_entry, tbl);
15863                         mlx5_list_unregister(tbl->matchers,
15864                                              &mtrmng->def_matcher[i]->entry);
15865                         mtrmng->def_matcher[i] = NULL;
15866                 }
15867                 for (j = 0; j < MLX5_REG_BITS; j++) {
15868                         if (mtrmng->drop_matcher[i][j]) {
15869                                 tbl =
15870                                 container_of(mtrmng->drop_matcher[i][j]->tbl,
15871                                              struct mlx5_flow_tbl_data_entry,
15872                                              tbl);
15873                                 mlx5_list_unregister(tbl->matchers,
15874                                             &mtrmng->drop_matcher[i][j]->entry);
15875                                 mtrmng->drop_matcher[i][j] = NULL;
15876                         }
15877                 }
15878                 if (mtrmng->drop_tbl[i]) {
15879                         flow_dv_tbl_resource_release(MLX5_SH(dev),
15880                                 mtrmng->drop_tbl[i]);
15881                         mtrmng->drop_tbl[i] = NULL;
15882                 }
15883         }
15884 }
15885
15886 /* Number of meter flow actions, count and jump or count and drop. */
15887 #define METER_ACTIONS 2
15888
15889 static void
15890 __flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
15891                                     enum mlx5_meter_domain domain)
15892 {
15893         struct mlx5_priv *priv = dev->data->dev_private;
15894         struct mlx5_flow_meter_def_policy *def_policy =
15895                         priv->sh->mtrmng->def_policy[domain];
15896
15897         __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
15898         mlx5_free(def_policy);
15899         priv->sh->mtrmng->def_policy[domain] = NULL;
15900 }
15901
15902 /**
15903  * Destroy the default policy table set.
15904  *
15905  * @param[in] dev
15906  *   Pointer to Ethernet device.
15907  */
15908 static void
15909 flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
15910 {
15911         struct mlx5_priv *priv = dev->data->dev_private;
15912         int i;
15913
15914         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
15915                 if (priv->sh->mtrmng->def_policy[i])
15916                         __flow_dv_destroy_domain_def_policy(dev,
15917                                         (enum mlx5_meter_domain)i);
15918         priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
15919 }
15920
15921 static int
15922 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
15923                         uint32_t color_reg_c_idx,
15924                         enum rte_color color, void *matcher_object,
15925                         int actions_n, void *actions,
15926                         bool match_src_port, const struct rte_flow_item *item,
15927                         void **rule, const struct rte_flow_attr *attr)
15928 {
15929         int ret;
15930         struct mlx5_flow_dv_match_params value = {
15931                 .size = sizeof(value.buf),
15932         };
15933         struct mlx5_flow_dv_match_params matcher = {
15934                 .size = sizeof(matcher.buf),
15935         };
15936         struct mlx5_priv *priv = dev->data->dev_private;
15937         uint8_t misc_mask;
15938
15939         if (match_src_port && (priv->representor || priv->master)) {
15940                 if (flow_dv_translate_item_port_id(dev, matcher.buf,
15941                                                    value.buf, item, attr)) {
15942                         DRV_LOG(ERR, "Failed to create meter policy%d flow's"
15943                                 " value with port.", color);
15944                         return -1;
15945                 }
15946         }
15947         flow_dv_match_meta_reg(matcher.buf, value.buf,
15948                                (enum modify_reg)color_reg_c_idx,
15949                                rte_col_2_mlx5_col(color), UINT32_MAX);
15950         misc_mask = flow_dv_matcher_enable(value.buf);
15951         __flow_dv_adjust_buf_size(&value.size, misc_mask);
15952         ret = mlx5_flow_os_create_flow(matcher_object, (void *)&value,
15953                                        actions_n, actions, rule);
15954         if (ret) {
15955                 DRV_LOG(ERR, "Failed to create meter policy%d flow.", color);
15956                 return -1;
15957         }
15958         return 0;
15959 }
15960
15961 static int
15962 __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
15963                         uint32_t color_reg_c_idx,
15964                         uint16_t priority,
15965                         struct mlx5_flow_meter_sub_policy *sub_policy,
15966                         const struct rte_flow_attr *attr,
15967                         bool match_src_port,
15968                         const struct rte_flow_item *item,
15969                         struct mlx5_flow_dv_matcher **policy_matcher,
15970                         struct rte_flow_error *error)
15971 {
15972         struct mlx5_list_entry *entry;
15973         struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
15974         struct mlx5_flow_dv_matcher matcher = {
15975                 .mask = {
15976                         .size = sizeof(matcher.mask.buf),
15977                 },
15978                 .tbl = tbl_rsc,
15979         };
15980         struct mlx5_flow_dv_match_params value = {
15981                 .size = sizeof(value.buf),
15982         };
15983         struct mlx5_flow_cb_ctx ctx = {
15984                 .error = error,
15985                 .data = &matcher,
15986         };
15987         struct mlx5_flow_tbl_data_entry *tbl_data;
15988         struct mlx5_priv *priv = dev->data->dev_private;
15989         const uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
15990
15991         if (match_src_port && (priv->representor || priv->master)) {
15992                 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
15993                                                    value.buf, item, attr)) {
15994                         DRV_LOG(ERR, "Failed to register meter policy%d matcher"
15995                                 " with port.", priority);
15996                         return -1;
15997                 }
15998         }
15999         tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
16000         if (priority < RTE_COLOR_RED)
16001                 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16002                         (enum modify_reg)color_reg_c_idx, 0, color_mask);
16003         matcher.priority = priority;
16004         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
16005                                     matcher.mask.size);
16006         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16007         if (!entry) {
16008                 DRV_LOG(ERR, "Failed to register meter drop matcher.");
16009                 return -1;
16010         }
16011         *policy_matcher =
16012                 container_of(entry, struct mlx5_flow_dv_matcher, entry);
16013         return 0;
16014 }
16015
16016 /**
16017  * Create the policy rules per domain.
16018  *
16019  * @param[in] dev
16020  *   Pointer to Ethernet device.
16021  * @param[in] sub_policy
16022  *    Pointer to sub policy table..
16023  * @param[in] egress
16024  *   Direction of the table.
16025  * @param[in] transfer
16026  *   E-Switch or NIC flow.
16027  * @param[in] acts
16028  *   Pointer to policy action list per color.
16029  *
16030  * @return
16031  *   0 on success, -1 otherwise.
16032  */
16033 static int
16034 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
16035                 struct mlx5_flow_meter_sub_policy *sub_policy,
16036                 uint8_t egress, uint8_t transfer, bool match_src_port,
16037                 struct mlx5_meter_policy_acts acts[RTE_COLORS])
16038 {
16039         struct mlx5_priv *priv = dev->data->dev_private;
16040         struct rte_flow_error flow_err;
16041         uint32_t color_reg_c_idx;
16042         struct rte_flow_attr attr = {
16043                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
16044                 .priority = 0,
16045                 .ingress = 0,
16046                 .egress = !!egress,
16047                 .transfer = !!transfer,
16048                 .reserved = 0,
16049         };
16050         int i;
16051         int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
16052         struct mlx5_sub_policy_color_rule *color_rule;
16053         bool svport_match;
16054         struct mlx5_sub_policy_color_rule *tmp_rules[RTE_COLORS] = {NULL};
16055
16056         if (ret < 0)
16057                 return -1;
16058         /* Create policy table with POLICY level. */
16059         if (!sub_policy->tbl_rsc)
16060                 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
16061                                 MLX5_FLOW_TABLE_LEVEL_POLICY,
16062                                 egress, transfer, false, NULL, 0, 0,
16063                                 sub_policy->idx, &flow_err);
16064         if (!sub_policy->tbl_rsc) {
16065                 DRV_LOG(ERR,
16066                         "Failed to create meter sub policy table.");
16067                 return -1;
16068         }
16069         /* Prepare matchers. */
16070         color_reg_c_idx = ret;
16071         for (i = 0; i < RTE_COLORS; i++) {
16072                 TAILQ_INIT(&sub_policy->color_rules[i]);
16073                 if (!acts[i].actions_n)
16074                         continue;
16075                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
16076                                 sizeof(struct mlx5_sub_policy_color_rule),
16077                                 0, SOCKET_ID_ANY);
16078                 if (!color_rule) {
16079                         DRV_LOG(ERR, "No memory to create color rule.");
16080                         goto err_exit;
16081                 }
16082                 tmp_rules[i] = color_rule;
16083                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
16084                                   color_rule, next_port);
16085                 color_rule->src_port = priv->representor_id;
16086                 /* No use. */
16087                 attr.priority = i;
16088                 /* Create matchers for colors. */
16089                 svport_match = (i != RTE_COLOR_RED) ? match_src_port : false;
16090                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
16091                                 MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
16092                                 &attr, svport_match, NULL,
16093                                 &color_rule->matcher, &flow_err)) {
16094                         DRV_LOG(ERR, "Failed to create color%u matcher.", i);
16095                         goto err_exit;
16096                 }
16097                 /* Create flow, matching color. */
16098                 if (__flow_dv_create_policy_flow(dev,
16099                                 color_reg_c_idx, (enum rte_color)i,
16100                                 color_rule->matcher->matcher_object,
16101                                 acts[i].actions_n, acts[i].dv_actions,
16102                                 svport_match, NULL, &color_rule->rule,
16103                                 &attr)) {
16104                         DRV_LOG(ERR, "Failed to create color%u rule.", i);
16105                         goto err_exit;
16106                 }
16107         }
16108         return 0;
16109 err_exit:
16110         /* All the policy rules will be cleared. */
16111         do {
16112                 color_rule = tmp_rules[i];
16113                 if (color_rule) {
16114                         if (color_rule->rule)
16115                                 mlx5_flow_os_destroy_flow(color_rule->rule);
16116                         if (color_rule->matcher) {
16117                                 struct mlx5_flow_tbl_data_entry *tbl =
16118                                         container_of(color_rule->matcher->tbl,
16119                                                      typeof(*tbl), tbl);
16120                                 mlx5_list_unregister(tbl->matchers,
16121                                                 &color_rule->matcher->entry);
16122                         }
16123                         TAILQ_REMOVE(&sub_policy->color_rules[i],
16124                                      color_rule, next_port);
16125                         mlx5_free(color_rule);
16126                 }
16127         } while (i--);
16128         return -1;
16129 }
16130
16131 static int
16132 __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
16133                         struct mlx5_flow_meter_policy *mtr_policy,
16134                         struct mlx5_flow_meter_sub_policy *sub_policy,
16135                         uint32_t domain)
16136 {
16137         struct mlx5_priv *priv = dev->data->dev_private;
16138         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16139         struct mlx5_flow_dv_tag_resource *tag;
16140         struct mlx5_flow_dv_port_id_action_resource *port_action;
16141         struct mlx5_hrxq *hrxq;
16142         struct mlx5_flow_meter_info *next_fm = NULL;
16143         struct mlx5_flow_meter_policy *next_policy;
16144         struct mlx5_flow_meter_sub_policy *next_sub_policy;
16145         struct mlx5_flow_tbl_data_entry *tbl_data;
16146         struct rte_flow_error error;
16147         uint8_t egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16148         uint8_t transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16149         bool mtr_first = egress || (transfer && priv->representor_id != UINT16_MAX);
16150         bool match_src_port = false;
16151         int i;
16152
16153         for (i = 0; i < RTE_COLORS; i++) {
16154                 acts[i].actions_n = 0;
16155                 if (i == RTE_COLOR_RED) {
16156                         /* Only support drop on red. */
16157                         acts[i].dv_actions[0] =
16158                                 mtr_policy->dr_drop_action[domain];
16159                         acts[i].actions_n = 1;
16160                         continue;
16161                 }
16162                 if (i == RTE_COLOR_GREEN &&
16163                     mtr_policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR) {
16164                         struct rte_flow_attr attr = {
16165                                 .transfer = transfer
16166                         };
16167
16168                         next_fm = mlx5_flow_meter_find(priv,
16169                                         mtr_policy->act_cnt[i].next_mtr_id,
16170                                         NULL);
16171                         if (!next_fm) {
16172                                 DRV_LOG(ERR,
16173                                         "Failed to get next hierarchy meter.");
16174                                 goto err_exit;
16175                         }
16176                         if (mlx5_flow_meter_attach(priv, next_fm,
16177                                                    &attr, &error)) {
16178                                 DRV_LOG(ERR, "%s", error.message);
16179                                 next_fm = NULL;
16180                                 goto err_exit;
16181                         }
16182                         /* Meter action must be the first for TX. */
16183                         if (mtr_first) {
16184                                 acts[i].dv_actions[acts[i].actions_n] =
16185                                         next_fm->meter_action;
16186                                 acts[i].actions_n++;
16187                         }
16188                 }
16189                 if (mtr_policy->act_cnt[i].rix_mark) {
16190                         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
16191                                         mtr_policy->act_cnt[i].rix_mark);
16192                         if (!tag) {
16193                                 DRV_LOG(ERR, "Failed to find "
16194                                 "mark action for policy.");
16195                                 goto err_exit;
16196                         }
16197                         acts[i].dv_actions[acts[i].actions_n] = tag->action;
16198                         acts[i].actions_n++;
16199                 }
16200                 if (mtr_policy->act_cnt[i].modify_hdr) {
16201                         acts[i].dv_actions[acts[i].actions_n] =
16202                                 mtr_policy->act_cnt[i].modify_hdr->action;
16203                         acts[i].actions_n++;
16204                 }
16205                 if (mtr_policy->act_cnt[i].fate_action) {
16206                         switch (mtr_policy->act_cnt[i].fate_action) {
16207                         case MLX5_FLOW_FATE_PORT_ID:
16208                                 port_action = mlx5_ipool_get
16209                                         (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
16210                                 mtr_policy->act_cnt[i].rix_port_id_action);
16211                                 if (!port_action) {
16212                                         DRV_LOG(ERR, "Failed to find "
16213                                                 "port action for policy.");
16214                                         goto err_exit;
16215                                 }
16216                                 acts[i].dv_actions[acts[i].actions_n] =
16217                                         port_action->action;
16218                                 acts[i].actions_n++;
16219                                 mtr_policy->dev = dev;
16220                                 match_src_port = true;
16221                                 break;
16222                         case MLX5_FLOW_FATE_DROP:
16223                         case MLX5_FLOW_FATE_JUMP:
16224                                 acts[i].dv_actions[acts[i].actions_n] =
16225                                 mtr_policy->act_cnt[i].dr_jump_action[domain];
16226                                 acts[i].actions_n++;
16227                                 break;
16228                         case MLX5_FLOW_FATE_SHARED_RSS:
16229                         case MLX5_FLOW_FATE_QUEUE:
16230                                 hrxq = mlx5_ipool_get
16231                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
16232                                          sub_policy->rix_hrxq[i]);
16233                                 if (!hrxq) {
16234                                         DRV_LOG(ERR, "Failed to find "
16235                                                 "queue action for policy.");
16236                                         goto err_exit;
16237                                 }
16238                                 acts[i].dv_actions[acts[i].actions_n] =
16239                                         hrxq->action;
16240                                 acts[i].actions_n++;
16241                                 break;
16242                         case MLX5_FLOW_FATE_MTR:
16243                                 if (!next_fm) {
16244                                         DRV_LOG(ERR,
16245                                                 "No next hierarchy meter.");
16246                                         goto err_exit;
16247                                 }
16248                                 if (!mtr_first) {
16249                                         acts[i].dv_actions[acts[i].actions_n] =
16250                                                         next_fm->meter_action;
16251                                         acts[i].actions_n++;
16252                                 }
16253                                 if (mtr_policy->act_cnt[i].next_sub_policy) {
16254                                         next_sub_policy =
16255                                         mtr_policy->act_cnt[i].next_sub_policy;
16256                                 } else {
16257                                         next_policy =
16258                                                 mlx5_flow_meter_policy_find(dev,
16259                                                 next_fm->policy_id, NULL);
16260                                         MLX5_ASSERT(next_policy);
16261                                         next_sub_policy =
16262                                         next_policy->sub_policys[domain][0];
16263                                 }
16264                                 tbl_data =
16265                                         container_of(next_sub_policy->tbl_rsc,
16266                                         struct mlx5_flow_tbl_data_entry, tbl);
16267                                 acts[i].dv_actions[acts[i].actions_n++] =
16268                                                         tbl_data->jump.action;
16269                                 if (mtr_policy->act_cnt[i].modify_hdr)
16270                                         match_src_port = !!transfer;
16271                                 break;
16272                         default:
16273                                 /*Queue action do nothing*/
16274                                 break;
16275                         }
16276                 }
16277         }
16278         if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
16279                                 egress, transfer, match_src_port, acts)) {
16280                 DRV_LOG(ERR,
16281                         "Failed to create policy rules per domain.");
16282                 goto err_exit;
16283         }
16284         return 0;
16285 err_exit:
16286         if (next_fm)
16287                 mlx5_flow_meter_detach(priv, next_fm);
16288         return -1;
16289 }
16290
16291 /**
16292  * Create the policy rules.
16293  *
16294  * @param[in] dev
16295  *   Pointer to Ethernet device.
16296  * @param[in,out] mtr_policy
16297  *   Pointer to meter policy table.
16298  *
16299  * @return
16300  *   0 on success, -1 otherwise.
16301  */
16302 static int
16303 flow_dv_create_policy_rules(struct rte_eth_dev *dev,
16304                              struct mlx5_flow_meter_policy *mtr_policy)
16305 {
16306         int i;
16307         uint16_t sub_policy_num;
16308
16309         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16310                 sub_policy_num = (mtr_policy->sub_policy_num >>
16311                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
16312                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16313                 if (!sub_policy_num)
16314                         continue;
16315                 /* Prepare actions list and create policy rules. */
16316                 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16317                         mtr_policy->sub_policys[i][0], i)) {
16318                         DRV_LOG(ERR, "Failed to create policy action "
16319                                 "list per domain.");
16320                         return -1;
16321                 }
16322         }
16323         return 0;
16324 }
16325
16326 static int
16327 __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
16328 {
16329         struct mlx5_priv *priv = dev->data->dev_private;
16330         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16331         struct mlx5_flow_meter_def_policy *def_policy;
16332         struct mlx5_flow_tbl_resource *jump_tbl;
16333         struct mlx5_flow_tbl_data_entry *tbl_data;
16334         uint8_t egress, transfer;
16335         struct rte_flow_error error;
16336         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16337         int ret;
16338
16339         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16340         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16341         def_policy = mtrmng->def_policy[domain];
16342         if (!def_policy) {
16343                 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
16344                         sizeof(struct mlx5_flow_meter_def_policy),
16345                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
16346                 if (!def_policy) {
16347                         DRV_LOG(ERR, "Failed to alloc default policy table.");
16348                         goto def_policy_error;
16349                 }
16350                 mtrmng->def_policy[domain] = def_policy;
16351                 /* Create the meter suffix table with SUFFIX level. */
16352                 jump_tbl = flow_dv_tbl_resource_get(dev,
16353                                 MLX5_FLOW_TABLE_LEVEL_METER,
16354                                 egress, transfer, false, NULL, 0,
16355                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
16356                 if (!jump_tbl) {
16357                         DRV_LOG(ERR,
16358                                 "Failed to create meter suffix table.");
16359                         goto def_policy_error;
16360                 }
16361                 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
16362                 tbl_data = container_of(jump_tbl,
16363                                         struct mlx5_flow_tbl_data_entry, tbl);
16364                 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
16365                                                 tbl_data->jump.action;
16366                 acts[RTE_COLOR_GREEN].dv_actions[0] = tbl_data->jump.action;
16367                 acts[RTE_COLOR_GREEN].actions_n = 1;
16368                 /*
16369                  * YELLOW has the same default policy as GREEN does.
16370                  * G & Y share the same table and action. The 2nd time of table
16371                  * resource getting is just to update the reference count for
16372                  * the releasing stage.
16373                  */
16374                 jump_tbl = flow_dv_tbl_resource_get(dev,
16375                                 MLX5_FLOW_TABLE_LEVEL_METER,
16376                                 egress, transfer, false, NULL, 0,
16377                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
16378                 if (!jump_tbl) {
16379                         DRV_LOG(ERR,
16380                                 "Failed to get meter suffix table.");
16381                         goto def_policy_error;
16382                 }
16383                 def_policy->sub_policy.jump_tbl[RTE_COLOR_YELLOW] = jump_tbl;
16384                 tbl_data = container_of(jump_tbl,
16385                                         struct mlx5_flow_tbl_data_entry, tbl);
16386                 def_policy->dr_jump_action[RTE_COLOR_YELLOW] =
16387                                                 tbl_data->jump.action;
16388                 acts[RTE_COLOR_YELLOW].dv_actions[0] = tbl_data->jump.action;
16389                 acts[RTE_COLOR_YELLOW].actions_n = 1;
16390                 /* Create jump action to the drop table. */
16391                 if (!mtrmng->drop_tbl[domain]) {
16392                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
16393                                 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
16394                                  egress, transfer, false, NULL, 0,
16395                                  0, MLX5_MTR_TABLE_ID_DROP, &error);
16396                         if (!mtrmng->drop_tbl[domain]) {
16397                                 DRV_LOG(ERR, "Failed to create meter "
16398                                         "drop table for default policy.");
16399                                 goto def_policy_error;
16400                         }
16401                 }
16402                 /* all RED: unique Drop table for jump action. */
16403                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16404                                         struct mlx5_flow_tbl_data_entry, tbl);
16405                 def_policy->dr_jump_action[RTE_COLOR_RED] =
16406                                                 tbl_data->jump.action;
16407                 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
16408                 acts[RTE_COLOR_RED].actions_n = 1;
16409                 /* Create default policy rules. */
16410                 ret = __flow_dv_create_domain_policy_rules(dev,
16411                                         &def_policy->sub_policy,
16412                                         egress, transfer, false, acts);
16413                 if (ret) {
16414                         DRV_LOG(ERR, "Failed to create default policy rules.");
16415                         goto def_policy_error;
16416                 }
16417         }
16418         return 0;
16419 def_policy_error:
16420         __flow_dv_destroy_domain_def_policy(dev,
16421                                             (enum mlx5_meter_domain)domain);
16422         return -1;
16423 }
16424
16425 /**
16426  * Create the default policy table set.
16427  *
16428  * @param[in] dev
16429  *   Pointer to Ethernet device.
16430  * @return
16431  *   0 on success, -1 otherwise.
16432  */
16433 static int
16434 flow_dv_create_def_policy(struct rte_eth_dev *dev)
16435 {
16436         struct mlx5_priv *priv = dev->data->dev_private;
16437         int i;
16438
16439         /* Non-termination policy table. */
16440         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16441                 if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
16442                         continue;
16443                 if (__flow_dv_create_domain_def_policy(dev, i)) {
16444                         DRV_LOG(ERR, "Failed to create default policy");
16445                         /* Rollback the created default policies for others. */
16446                         flow_dv_destroy_def_policy(dev);
16447                         return -1;
16448                 }
16449         }
16450         return 0;
16451 }
16452
16453 /**
16454  * Create the needed meter tables.
16455  * Lock free, (mutex should be acquired by caller).
16456  *
16457  * @param[in] dev
16458  *   Pointer to Ethernet device.
16459  * @param[in] fm
16460  *   Meter information table.
16461  * @param[in] mtr_idx
16462  *   Meter index.
16463  * @param[in] domain_bitmap
16464  *   Domain bitmap.
16465  * @return
16466  *   0 on success, -1 otherwise.
16467  */
16468 static int
16469 flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
16470                         struct mlx5_flow_meter_info *fm,
16471                         uint32_t mtr_idx,
16472                         uint8_t domain_bitmap)
16473 {
16474         struct mlx5_priv *priv = dev->data->dev_private;
16475         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16476         struct rte_flow_error error;
16477         struct mlx5_flow_tbl_data_entry *tbl_data;
16478         uint8_t egress, transfer;
16479         void *actions[METER_ACTIONS];
16480         int domain, ret, i;
16481         struct mlx5_flow_counter *cnt;
16482         struct mlx5_flow_dv_match_params value = {
16483                 .size = sizeof(value.buf),
16484         };
16485         struct mlx5_flow_dv_match_params matcher_para = {
16486                 .size = sizeof(matcher_para.buf),
16487         };
16488         int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
16489                                                      0, &error);
16490         uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
16491         uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
16492         struct mlx5_list_entry *entry;
16493         struct mlx5_flow_dv_matcher matcher = {
16494                 .mask = {
16495                         .size = sizeof(matcher.mask.buf),
16496                 },
16497         };
16498         struct mlx5_flow_dv_matcher *drop_matcher;
16499         struct mlx5_flow_cb_ctx ctx = {
16500                 .error = &error,
16501                 .data = &matcher,
16502         };
16503         uint8_t misc_mask;
16504
16505         if (!priv->mtr_en || mtr_id_reg_c < 0) {
16506                 rte_errno = ENOTSUP;
16507                 return -1;
16508         }
16509         for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
16510                 if (!(domain_bitmap & (1 << domain)) ||
16511                         (mtrmng->def_rule[domain] && !fm->drop_cnt))
16512                         continue;
16513                 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16514                 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16515                 /* Create the drop table with METER DROP level. */
16516                 if (!mtrmng->drop_tbl[domain]) {
16517                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
16518                                         MLX5_FLOW_TABLE_LEVEL_METER,
16519                                         egress, transfer, false, NULL, 0,
16520                                         0, MLX5_MTR_TABLE_ID_DROP, &error);
16521                         if (!mtrmng->drop_tbl[domain]) {
16522                                 DRV_LOG(ERR, "Failed to create meter drop table.");
16523                                 goto policy_error;
16524                         }
16525                 }
16526                 /* Create default matcher in drop table. */
16527                 matcher.tbl = mtrmng->drop_tbl[domain],
16528                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16529                                 struct mlx5_flow_tbl_data_entry, tbl);
16530                 if (!mtrmng->def_matcher[domain]) {
16531                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16532                                        (enum modify_reg)mtr_id_reg_c,
16533                                        0, 0);
16534                         matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
16535                         matcher.crc = rte_raw_cksum
16536                                         ((const void *)matcher.mask.buf,
16537                                         matcher.mask.size);
16538                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16539                         if (!entry) {
16540                                 DRV_LOG(ERR, "Failed to register meter "
16541                                 "drop default matcher.");
16542                                 goto policy_error;
16543                         }
16544                         mtrmng->def_matcher[domain] = container_of(entry,
16545                         struct mlx5_flow_dv_matcher, entry);
16546                 }
16547                 /* Create default rule in drop table. */
16548                 if (!mtrmng->def_rule[domain]) {
16549                         i = 0;
16550                         actions[i++] = priv->sh->dr_drop_action;
16551                         flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16552                                 (enum modify_reg)mtr_id_reg_c, 0, 0);
16553                         misc_mask = flow_dv_matcher_enable(value.buf);
16554                         __flow_dv_adjust_buf_size(&value.size, misc_mask);
16555                         ret = mlx5_flow_os_create_flow
16556                                 (mtrmng->def_matcher[domain]->matcher_object,
16557                                 (void *)&value, i, actions,
16558                                 &mtrmng->def_rule[domain]);
16559                         if (ret) {
16560                                 DRV_LOG(ERR, "Failed to create meter "
16561                                 "default drop rule for drop table.");
16562                                 goto policy_error;
16563                         }
16564                 }
16565                 if (!fm->drop_cnt)
16566                         continue;
16567                 MLX5_ASSERT(mtrmng->max_mtr_bits);
16568                 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
16569                         /* Create matchers for Drop. */
16570                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16571                                         (enum modify_reg)mtr_id_reg_c, 0,
16572                                         (mtr_id_mask << mtr_id_offset));
16573                         matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
16574                         matcher.crc = rte_raw_cksum
16575                                         ((const void *)matcher.mask.buf,
16576                                         matcher.mask.size);
16577                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16578                         if (!entry) {
16579                                 DRV_LOG(ERR,
16580                                 "Failed to register meter drop matcher.");
16581                                 goto policy_error;
16582                         }
16583                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
16584                                 container_of(entry, struct mlx5_flow_dv_matcher,
16585                                              entry);
16586                 }
16587                 drop_matcher =
16588                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
16589                 /* Create drop rule, matching meter_id only. */
16590                 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16591                                 (enum modify_reg)mtr_id_reg_c,
16592                                 (mtr_idx << mtr_id_offset), UINT32_MAX);
16593                 i = 0;
16594                 cnt = flow_dv_counter_get_by_idx(dev,
16595                                         fm->drop_cnt, NULL);
16596                 actions[i++] = cnt->action;
16597                 actions[i++] = priv->sh->dr_drop_action;
16598                 misc_mask = flow_dv_matcher_enable(value.buf);
16599                 __flow_dv_adjust_buf_size(&value.size, misc_mask);
16600                 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
16601                                                (void *)&value, i, actions,
16602                                                &fm->drop_rule[domain]);
16603                 if (ret) {
16604                         DRV_LOG(ERR, "Failed to create meter "
16605                                 "drop rule for drop table.");
16606                                 goto policy_error;
16607                 }
16608         }
16609         return 0;
16610 policy_error:
16611         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16612                 if (fm->drop_rule[i]) {
16613                         claim_zero(mlx5_flow_os_destroy_flow
16614                                 (fm->drop_rule[i]));
16615                         fm->drop_rule[i] = NULL;
16616                 }
16617         }
16618         return -1;
16619 }
16620
16621 static struct mlx5_flow_meter_sub_policy *
16622 __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
16623                 struct mlx5_flow_meter_policy *mtr_policy,
16624                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS],
16625                 struct mlx5_flow_meter_sub_policy *next_sub_policy,
16626                 bool *is_reuse)
16627 {
16628         struct mlx5_priv *priv = dev->data->dev_private;
16629         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16630         uint32_t sub_policy_idx = 0;
16631         uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
16632         uint32_t i, j;
16633         struct mlx5_hrxq *hrxq;
16634         struct mlx5_flow_handle dh;
16635         struct mlx5_meter_policy_action_container *act_cnt;
16636         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16637         uint16_t sub_policy_num;
16638
16639         rte_spinlock_lock(&mtr_policy->sl);
16640         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16641                 if (!rss_desc[i])
16642                         continue;
16643                 hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
16644                 if (!hrxq_idx[i]) {
16645                         rte_spinlock_unlock(&mtr_policy->sl);
16646                         return NULL;
16647                 }
16648         }
16649         sub_policy_num = (mtr_policy->sub_policy_num >>
16650                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16651                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16652         for (i = 0; i < sub_policy_num;
16653                 i++) {
16654                 for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
16655                         if (rss_desc[j] &&
16656                                 hrxq_idx[j] !=
16657                         mtr_policy->sub_policys[domain][i]->rix_hrxq[j])
16658                                 break;
16659                 }
16660                 if (j >= MLX5_MTR_RTE_COLORS) {
16661                         /*
16662                          * Found the sub policy table with
16663                          * the same queue per color
16664                          */
16665                         rte_spinlock_unlock(&mtr_policy->sl);
16666                         for (j = 0; j < MLX5_MTR_RTE_COLORS; j++)
16667                                 mlx5_hrxq_release(dev, hrxq_idx[j]);
16668                         *is_reuse = true;
16669                         return mtr_policy->sub_policys[domain][i];
16670                 }
16671         }
16672         /* Create sub policy. */
16673         if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
16674                 /* Reuse the first dummy sub_policy*/
16675                 sub_policy = mtr_policy->sub_policys[domain][0];
16676                 sub_policy_idx = sub_policy->idx;
16677         } else {
16678                 sub_policy = mlx5_ipool_zmalloc
16679                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16680                                 &sub_policy_idx);
16681                 if (!sub_policy ||
16682                         sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
16683                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16684                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
16685                         goto rss_sub_policy_error;
16686                 }
16687                 sub_policy->idx = sub_policy_idx;
16688                 sub_policy->main_policy = mtr_policy;
16689         }
16690         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16691                 if (!rss_desc[i])
16692                         continue;
16693                 sub_policy->rix_hrxq[i] = hrxq_idx[i];
16694                 if (mtr_policy->is_hierarchy) {
16695                         act_cnt = &mtr_policy->act_cnt[i];
16696                         act_cnt->next_sub_policy = next_sub_policy;
16697                         mlx5_hrxq_release(dev, hrxq_idx[i]);
16698                 } else {
16699                         /*
16700                          * Overwrite the last action from
16701                          * RSS action to Queue action.
16702                          */
16703                         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
16704                                 hrxq_idx[i]);
16705                         if (!hrxq) {
16706                                 DRV_LOG(ERR, "Failed to create policy hrxq");
16707                                 goto rss_sub_policy_error;
16708                         }
16709                         act_cnt = &mtr_policy->act_cnt[i];
16710                         if (act_cnt->rix_mark || act_cnt->modify_hdr) {
16711                                 memset(&dh, 0, sizeof(struct mlx5_flow_handle));
16712                                 if (act_cnt->rix_mark)
16713                                         dh.mark = 1;
16714                                 dh.fate_action = MLX5_FLOW_FATE_QUEUE;
16715                                 dh.rix_hrxq = hrxq_idx[i];
16716                                 flow_drv_rxq_flags_set(dev, &dh);
16717                         }
16718                 }
16719         }
16720         if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16721                 sub_policy, domain)) {
16722                 DRV_LOG(ERR, "Failed to create policy "
16723                         "rules per domain.");
16724                 goto rss_sub_policy_error;
16725         }
16726         if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16727                 i = (mtr_policy->sub_policy_num >>
16728                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16729                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16730                 mtr_policy->sub_policys[domain][i] = sub_policy;
16731                 i++;
16732                 if (i > MLX5_MTR_RSS_MAX_SUB_POLICY)
16733                         goto rss_sub_policy_error;
16734                 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16735                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16736                 mtr_policy->sub_policy_num |=
16737                         (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16738                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16739         }
16740         rte_spinlock_unlock(&mtr_policy->sl);
16741         *is_reuse = false;
16742         return sub_policy;
16743 rss_sub_policy_error:
16744         if (sub_policy) {
16745                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16746                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16747                         i = (mtr_policy->sub_policy_num >>
16748                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16749                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16750                         mtr_policy->sub_policys[domain][i] = NULL;
16751                         mlx5_ipool_free
16752                         (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16753                                         sub_policy->idx);
16754                 }
16755         }
16756         rte_spinlock_unlock(&mtr_policy->sl);
16757         return NULL;
16758 }
16759
16760 /**
16761  * Find the policy table for prefix table with RSS.
16762  *
16763  * @param[in] dev
16764  *   Pointer to Ethernet device.
16765  * @param[in] mtr_policy
16766  *   Pointer to meter policy table.
16767  * @param[in] rss_desc
16768  *   Pointer to rss_desc
16769  * @return
16770  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
16771  */
16772 static struct mlx5_flow_meter_sub_policy *
16773 flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
16774                 struct mlx5_flow_meter_policy *mtr_policy,
16775                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
16776 {
16777         struct mlx5_priv *priv = dev->data->dev_private;
16778         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16779         struct mlx5_flow_meter_info *next_fm;
16780         struct mlx5_flow_meter_policy *next_policy;
16781         struct mlx5_flow_meter_sub_policy *next_sub_policy = NULL;
16782         struct mlx5_flow_meter_policy *policies[MLX5_MTR_CHAIN_MAX_NUM];
16783         struct mlx5_flow_meter_sub_policy *sub_policies[MLX5_MTR_CHAIN_MAX_NUM];
16784         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16785         bool reuse_sub_policy;
16786         uint32_t i = 0;
16787         uint32_t j = 0;
16788
16789         while (true) {
16790                 /* Iterate hierarchy to get all policies in this hierarchy. */
16791                 policies[i++] = mtr_policy;
16792                 if (!mtr_policy->is_hierarchy)
16793                         break;
16794                 if (i >= MLX5_MTR_CHAIN_MAX_NUM) {
16795                         DRV_LOG(ERR, "Exceed max meter number in hierarchy.");
16796                         return NULL;
16797                 }
16798                 next_fm = mlx5_flow_meter_find(priv,
16799                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
16800                 if (!next_fm) {
16801                         DRV_LOG(ERR, "Failed to get next meter in hierarchy.");
16802                         return NULL;
16803                 }
16804                 next_policy =
16805                         mlx5_flow_meter_policy_find(dev, next_fm->policy_id,
16806                                                     NULL);
16807                 MLX5_ASSERT(next_policy);
16808                 mtr_policy = next_policy;
16809         }
16810         while (i) {
16811                 /**
16812                  * From last policy to the first one in hierarchy,
16813                  * create/get the sub policy for each of them.
16814                  */
16815                 sub_policy = __flow_dv_meter_get_rss_sub_policy(dev,
16816                                                         policies[--i],
16817                                                         rss_desc,
16818                                                         next_sub_policy,
16819                                                         &reuse_sub_policy);
16820                 if (!sub_policy) {
16821                         DRV_LOG(ERR, "Failed to get the sub policy.");
16822                         goto err_exit;
16823                 }
16824                 if (!reuse_sub_policy)
16825                         sub_policies[j++] = sub_policy;
16826                 next_sub_policy = sub_policy;
16827         }
16828         return sub_policy;
16829 err_exit:
16830         while (j) {
16831                 uint16_t sub_policy_num;
16832
16833                 sub_policy = sub_policies[--j];
16834                 mtr_policy = sub_policy->main_policy;
16835                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16836                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16837                         sub_policy_num = (mtr_policy->sub_policy_num >>
16838                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16839                                 MLX5_MTR_SUB_POLICY_NUM_MASK;
16840                         mtr_policy->sub_policys[domain][sub_policy_num - 1] =
16841                                                                         NULL;
16842                         sub_policy_num--;
16843                         mtr_policy->sub_policy_num &=
16844                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16845                                   (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i));
16846                         mtr_policy->sub_policy_num |=
16847                         (sub_policy_num & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16848                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i);
16849                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16850                                         sub_policy->idx);
16851                 }
16852         }
16853         return NULL;
16854 }
16855
16856 /**
16857  * Create the sub policy tag rule for all meters in hierarchy.
16858  *
16859  * @param[in] dev
16860  *   Pointer to Ethernet device.
16861  * @param[in] fm
16862  *   Meter information table.
16863  * @param[in] src_port
16864  *   The src port this extra rule should use.
16865  * @param[in] item
16866  *   The src port match item.
16867  * @param[out] error
16868  *   Perform verbose error reporting if not NULL.
16869  * @return
16870  *   0 on success, a negative errno value otherwise and rte_errno is set.
16871  */
16872 static int
16873 flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
16874                                 struct mlx5_flow_meter_info *fm,
16875                                 int32_t src_port,
16876                                 const struct rte_flow_item *item,
16877                                 struct rte_flow_error *error)
16878 {
16879         struct mlx5_priv *priv = dev->data->dev_private;
16880         struct mlx5_flow_meter_policy *mtr_policy;
16881         struct mlx5_flow_meter_sub_policy *sub_policy;
16882         struct mlx5_flow_meter_info *next_fm = NULL;
16883         struct mlx5_flow_meter_policy *next_policy;
16884         struct mlx5_flow_meter_sub_policy *next_sub_policy;
16885         struct mlx5_flow_tbl_data_entry *tbl_data;
16886         struct mlx5_sub_policy_color_rule *color_rule;
16887         struct mlx5_meter_policy_acts acts;
16888         uint32_t color_reg_c_idx;
16889         bool mtr_first = (src_port != UINT16_MAX) ? true : false;
16890         struct rte_flow_attr attr = {
16891                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
16892                 .priority = 0,
16893                 .ingress = 0,
16894                 .egress = 0,
16895                 .transfer = 1,
16896                 .reserved = 0,
16897         };
16898         uint32_t domain = MLX5_MTR_DOMAIN_TRANSFER;
16899         int i;
16900
16901         mtr_policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
16902         MLX5_ASSERT(mtr_policy);
16903         if (!mtr_policy->is_hierarchy)
16904                 return 0;
16905         next_fm = mlx5_flow_meter_find(priv,
16906                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
16907         if (!next_fm) {
16908                 return rte_flow_error_set(error, EINVAL,
16909                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
16910                                 "Failed to find next meter in hierarchy.");
16911         }
16912         if (!next_fm->drop_cnt)
16913                 goto exit;
16914         color_reg_c_idx = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, error);
16915         sub_policy = mtr_policy->sub_policys[domain][0];
16916         for (i = 0; i < RTE_COLORS; i++) {
16917                 bool rule_exist = false;
16918                 struct mlx5_meter_policy_action_container *act_cnt;
16919
16920                 if (i >= RTE_COLOR_YELLOW)
16921                         break;
16922                 TAILQ_FOREACH(color_rule,
16923                               &sub_policy->color_rules[i], next_port)
16924                         if (color_rule->src_port == src_port) {
16925                                 rule_exist = true;
16926                                 break;
16927                         }
16928                 if (rule_exist)
16929                         continue;
16930                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
16931                                 sizeof(struct mlx5_sub_policy_color_rule),
16932                                 0, SOCKET_ID_ANY);
16933                 if (!color_rule)
16934                         return rte_flow_error_set(error, ENOMEM,
16935                                 RTE_FLOW_ERROR_TYPE_ACTION,
16936                                 NULL, "No memory to create tag color rule.");
16937                 color_rule->src_port = src_port;
16938                 attr.priority = i;
16939                 next_policy = mlx5_flow_meter_policy_find(dev,
16940                                                 next_fm->policy_id, NULL);
16941                 MLX5_ASSERT(next_policy);
16942                 next_sub_policy = next_policy->sub_policys[domain][0];
16943                 tbl_data = container_of(next_sub_policy->tbl_rsc,
16944                                         struct mlx5_flow_tbl_data_entry, tbl);
16945                 act_cnt = &mtr_policy->act_cnt[i];
16946                 if (mtr_first) {
16947                         acts.dv_actions[0] = next_fm->meter_action;
16948                         acts.dv_actions[1] = act_cnt->modify_hdr->action;
16949                 } else {
16950                         acts.dv_actions[0] = act_cnt->modify_hdr->action;
16951                         acts.dv_actions[1] = next_fm->meter_action;
16952                 }
16953                 acts.dv_actions[2] = tbl_data->jump.action;
16954                 acts.actions_n = 3;
16955                 if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) {
16956                         next_fm = NULL;
16957                         goto err_exit;
16958                 }
16959                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
16960                                 MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
16961                                 &attr, true, item,
16962                                 &color_rule->matcher, error)) {
16963                         rte_flow_error_set(error, errno,
16964                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16965                                 "Failed to create hierarchy meter matcher.");
16966                         goto err_exit;
16967                 }
16968                 if (__flow_dv_create_policy_flow(dev, color_reg_c_idx,
16969                                         (enum rte_color)i,
16970                                         color_rule->matcher->matcher_object,
16971                                         acts.actions_n, acts.dv_actions,
16972                                         true, item,
16973                                         &color_rule->rule, &attr)) {
16974                         rte_flow_error_set(error, errno,
16975                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16976                                 "Failed to create hierarchy meter rule.");
16977                         goto err_exit;
16978                 }
16979                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
16980                                   color_rule, next_port);
16981         }
16982 exit:
16983         /**
16984          * Recursive call to iterate all meters in hierarchy and
16985          * create needed rules.
16986          */
16987         return flow_dv_meter_hierarchy_rule_create(dev, next_fm,
16988                                                 src_port, item, error);
16989 err_exit:
16990         if (color_rule) {
16991                 if (color_rule->rule)
16992                         mlx5_flow_os_destroy_flow(color_rule->rule);
16993                 if (color_rule->matcher) {
16994                         struct mlx5_flow_tbl_data_entry *tbl =
16995                                 container_of(color_rule->matcher->tbl,
16996                                                 typeof(*tbl), tbl);
16997                         mlx5_list_unregister(tbl->matchers,
16998                                                 &color_rule->matcher->entry);
16999                 }
17000                 mlx5_free(color_rule);
17001         }
17002         if (next_fm)
17003                 mlx5_flow_meter_detach(priv, next_fm);
17004         return -rte_errno;
17005 }
17006
17007 /**
17008  * Destroy the sub policy table with RX queue.
17009  *
17010  * @param[in] dev
17011  *   Pointer to Ethernet device.
17012  * @param[in] mtr_policy
17013  *   Pointer to meter policy table.
17014  */
17015 static void
17016 flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
17017                 struct mlx5_flow_meter_policy *mtr_policy)
17018 {
17019         struct mlx5_priv *priv = dev->data->dev_private;
17020         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
17021         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
17022         uint32_t i, j;
17023         uint16_t sub_policy_num, new_policy_num;
17024
17025         rte_spinlock_lock(&mtr_policy->sl);
17026         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
17027                 switch (mtr_policy->act_cnt[i].fate_action) {
17028                 case MLX5_FLOW_FATE_SHARED_RSS:
17029                         sub_policy_num = (mtr_policy->sub_policy_num >>
17030                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17031                         MLX5_MTR_SUB_POLICY_NUM_MASK;
17032                         new_policy_num = sub_policy_num;
17033                         for (j = 0; j < sub_policy_num; j++) {
17034                                 sub_policy =
17035                                         mtr_policy->sub_policys[domain][j];
17036                                 if (sub_policy) {
17037                                         __flow_dv_destroy_sub_policy_rules(dev,
17038                                                 sub_policy);
17039                                 if (sub_policy !=
17040                                         mtr_policy->sub_policys[domain][0]) {
17041                                         mtr_policy->sub_policys[domain][j] =
17042                                                                 NULL;
17043                                         mlx5_ipool_free
17044                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17045                                                 sub_policy->idx);
17046                                                 new_policy_num--;
17047                                         }
17048                                 }
17049                         }
17050                         if (new_policy_num != sub_policy_num) {
17051                                 mtr_policy->sub_policy_num &=
17052                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17053                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
17054                                 mtr_policy->sub_policy_num |=
17055                                 (new_policy_num &
17056                                         MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17057                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
17058                         }
17059                         break;
17060                 case MLX5_FLOW_FATE_QUEUE:
17061                         sub_policy = mtr_policy->sub_policys[domain][0];
17062                         __flow_dv_destroy_sub_policy_rules(dev,
17063                                                 sub_policy);
17064                         break;
17065                 default:
17066                         /*Other actions without queue and do nothing*/
17067                         break;
17068                 }
17069         }
17070         rte_spinlock_unlock(&mtr_policy->sl);
17071 }
17072
17073 /**
17074  * Validate the batch counter support in root table.
17075  *
17076  * Create a simple flow with invalid counter and drop action on root table to
17077  * validate if batch counter with offset on root table is supported or not.
17078  *
17079  * @param[in] dev
17080  *   Pointer to rte_eth_dev structure.
17081  *
17082  * @return
17083  *   0 on success, a negative errno value otherwise and rte_errno is set.
17084  */
17085 int
17086 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
17087 {
17088         struct mlx5_priv *priv = dev->data->dev_private;
17089         struct mlx5_dev_ctx_shared *sh = priv->sh;
17090         struct mlx5_flow_dv_match_params mask = {
17091                 .size = sizeof(mask.buf),
17092         };
17093         struct mlx5_flow_dv_match_params value = {
17094                 .size = sizeof(value.buf),
17095         };
17096         struct mlx5dv_flow_matcher_attr dv_attr = {
17097                 .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
17098                 .priority = 0,
17099                 .match_criteria_enable = 0,
17100                 .match_mask = (void *)&mask,
17101         };
17102         void *actions[2] = { 0 };
17103         struct mlx5_flow_tbl_resource *tbl = NULL;
17104         struct mlx5_devx_obj *dcs = NULL;
17105         void *matcher = NULL;
17106         void *flow = NULL;
17107         int ret = -1;
17108
17109         tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
17110                                         0, 0, 0, NULL);
17111         if (!tbl)
17112                 goto err;
17113         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
17114         if (!dcs)
17115                 goto err;
17116         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
17117                                                     &actions[0]);
17118         if (ret)
17119                 goto err;
17120         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17121         __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17122         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
17123                                                &matcher);
17124         if (ret)
17125                 goto err;
17126         __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17127         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17128                                        actions, &flow);
17129 err:
17130         /*
17131          * If batch counter with offset is not supported, the driver will not
17132          * validate the invalid offset value, flow create should success.
17133          * In this case, it means batch counter is not supported in root table.
17134          *
17135          * Otherwise, if flow create is failed, counter offset is supported.
17136          */
17137         if (flow) {
17138                 DRV_LOG(INFO, "Batch counter is not supported in root "
17139                               "table. Switch to fallback mode.");
17140                 rte_errno = ENOTSUP;
17141                 ret = -rte_errno;
17142                 claim_zero(mlx5_flow_os_destroy_flow(flow));
17143         } else {
17144                 /* Check matcher to make sure validate fail at flow create. */
17145                 if (!matcher || (matcher && errno != EINVAL))
17146                         DRV_LOG(ERR, "Unexpected error in counter offset "
17147                                      "support detection");
17148                 ret = 0;
17149         }
17150         if (actions[0])
17151                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
17152         if (matcher)
17153                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
17154         if (tbl)
17155                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
17156         if (dcs)
17157                 claim_zero(mlx5_devx_cmd_destroy(dcs));
17158         return ret;
17159 }
17160
17161 /**
17162  * Query a devx counter.
17163  *
17164  * @param[in] dev
17165  *   Pointer to the Ethernet device structure.
17166  * @param[in] cnt
17167  *   Index to the flow counter.
17168  * @param[in] clear
17169  *   Set to clear the counter statistics.
17170  * @param[out] pkts
17171  *   The statistics value of packets.
17172  * @param[out] bytes
17173  *   The statistics value of bytes.
17174  *
17175  * @return
17176  *   0 on success, otherwise return -1.
17177  */
17178 static int
17179 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
17180                       uint64_t *pkts, uint64_t *bytes)
17181 {
17182         struct mlx5_priv *priv = dev->data->dev_private;
17183         struct mlx5_flow_counter *cnt;
17184         uint64_t inn_pkts, inn_bytes;
17185         int ret;
17186
17187         if (!priv->config.devx)
17188                 return -1;
17189
17190         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
17191         if (ret)
17192                 return -1;
17193         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
17194         *pkts = inn_pkts - cnt->hits;
17195         *bytes = inn_bytes - cnt->bytes;
17196         if (clear) {
17197                 cnt->hits = inn_pkts;
17198                 cnt->bytes = inn_bytes;
17199         }
17200         return 0;
17201 }
17202
17203 /**
17204  * Get aged-out flows.
17205  *
17206  * @param[in] dev
17207  *   Pointer to the Ethernet device structure.
17208  * @param[in] context
17209  *   The address of an array of pointers to the aged-out flows contexts.
17210  * @param[in] nb_contexts
17211  *   The length of context array pointers.
17212  * @param[out] error
17213  *   Perform verbose error reporting if not NULL. Initialized in case of
17214  *   error only.
17215  *
17216  * @return
17217  *   how many contexts get in success, otherwise negative errno value.
17218  *   if nb_contexts is 0, return the amount of all aged contexts.
17219  *   if nb_contexts is not 0 , return the amount of aged flows reported
17220  *   in the context array.
17221  * @note: only stub for now
17222  */
17223 static int
17224 flow_get_aged_flows(struct rte_eth_dev *dev,
17225                     void **context,
17226                     uint32_t nb_contexts,
17227                     struct rte_flow_error *error)
17228 {
17229         struct mlx5_priv *priv = dev->data->dev_private;
17230         struct mlx5_age_info *age_info;
17231         struct mlx5_age_param *age_param;
17232         struct mlx5_flow_counter *counter;
17233         struct mlx5_aso_age_action *act;
17234         int nb_flows = 0;
17235
17236         if (nb_contexts && !context)
17237                 return rte_flow_error_set(error, EINVAL,
17238                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17239                                           NULL, "empty context");
17240         age_info = GET_PORT_AGE_INFO(priv);
17241         rte_spinlock_lock(&age_info->aged_sl);
17242         LIST_FOREACH(act, &age_info->aged_aso, next) {
17243                 nb_flows++;
17244                 if (nb_contexts) {
17245                         context[nb_flows - 1] =
17246                                                 act->age_params.context;
17247                         if (!(--nb_contexts))
17248                                 break;
17249                 }
17250         }
17251         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
17252                 nb_flows++;
17253                 if (nb_contexts) {
17254                         age_param = MLX5_CNT_TO_AGE(counter);
17255                         context[nb_flows - 1] = age_param->context;
17256                         if (!(--nb_contexts))
17257                                 break;
17258                 }
17259         }
17260         rte_spinlock_unlock(&age_info->aged_sl);
17261         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
17262         return nb_flows;
17263 }
17264
17265 /*
17266  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
17267  */
17268 static uint32_t
17269 flow_dv_counter_allocate(struct rte_eth_dev *dev)
17270 {
17271         return flow_dv_counter_alloc(dev, 0);
17272 }
17273
17274 /**
17275  * Validate indirect action.
17276  * Dispatcher for action type specific validation.
17277  *
17278  * @param[in] dev
17279  *   Pointer to the Ethernet device structure.
17280  * @param[in] conf
17281  *   Indirect action configuration.
17282  * @param[in] action
17283  *   The indirect action object to validate.
17284  * @param[out] error
17285  *   Perform verbose error reporting if not NULL. Initialized in case of
17286  *   error only.
17287  *
17288  * @return
17289  *   0 on success, otherwise negative errno value.
17290  */
17291 static int
17292 flow_dv_action_validate(struct rte_eth_dev *dev,
17293                         const struct rte_flow_indir_action_conf *conf,
17294                         const struct rte_flow_action *action,
17295                         struct rte_flow_error *err)
17296 {
17297         struct mlx5_priv *priv = dev->data->dev_private;
17298
17299         RTE_SET_USED(conf);
17300         switch (action->type) {
17301         case RTE_FLOW_ACTION_TYPE_RSS:
17302                 /*
17303                  * priv->obj_ops is set according to driver capabilities.
17304                  * When DevX capabilities are
17305                  * sufficient, it is set to devx_obj_ops.
17306                  * Otherwise, it is set to ibv_obj_ops.
17307                  * ibv_obj_ops doesn't support ind_table_modify operation.
17308                  * In this case the indirect RSS action can't be used.
17309                  */
17310                 if (priv->obj_ops.ind_table_modify == NULL)
17311                         return rte_flow_error_set
17312                                         (err, ENOTSUP,
17313                                          RTE_FLOW_ERROR_TYPE_ACTION,
17314                                          NULL,
17315                                          "Indirect RSS action not supported");
17316                 return mlx5_validate_action_rss(dev, action, err);
17317         case RTE_FLOW_ACTION_TYPE_AGE:
17318                 if (!priv->sh->aso_age_mng)
17319                         return rte_flow_error_set(err, ENOTSUP,
17320                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17321                                                 NULL,
17322                                                 "Indirect age action not supported");
17323                 return flow_dv_validate_action_age(0, action, dev, err);
17324         case RTE_FLOW_ACTION_TYPE_COUNT:
17325                 /*
17326                  * There are two mechanisms to share the action count.
17327                  * The old mechanism uses the shared field to share, while the
17328                  * new mechanism uses the indirect action API.
17329                  * This validation comes to make sure that the two mechanisms
17330                  * are not combined.
17331                  */
17332                 if (is_shared_action_count(action))
17333                         return rte_flow_error_set(err, ENOTSUP,
17334                                                   RTE_FLOW_ERROR_TYPE_ACTION,
17335                                                   NULL,
17336                                                   "Mix shared and indirect counter is not supported");
17337                 return flow_dv_validate_action_count(dev, true, 0, err);
17338         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
17339                 if (!priv->sh->ct_aso_en)
17340                         return rte_flow_error_set(err, ENOTSUP,
17341                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17342                                         "ASO CT is not supported");
17343                 return mlx5_validate_action_ct(dev, action->conf, err);
17344         default:
17345                 return rte_flow_error_set(err, ENOTSUP,
17346                                           RTE_FLOW_ERROR_TYPE_ACTION,
17347                                           NULL,
17348                                           "action type not supported");
17349         }
17350 }
17351
17352 /**
17353  * Validate the meter hierarchy chain for meter policy.
17354  *
17355  * @param[in] dev
17356  *   Pointer to the Ethernet device structure.
17357  * @param[in] meter_id
17358  *   Meter id.
17359  * @param[in] action_flags
17360  *   Holds the actions detected until now.
17361  * @param[out] is_rss
17362  *   Is RSS or not.
17363  * @param[out] hierarchy_domain
17364  *   The domain bitmap for hierarchy policy.
17365  * @param[out] error
17366  *   Perform verbose error reporting if not NULL. Initialized in case of
17367  *   error only.
17368  *
17369  * @return
17370  *   0 on success, otherwise negative errno value with error set.
17371  */
17372 static int
17373 flow_dv_validate_policy_mtr_hierarchy(struct rte_eth_dev *dev,
17374                                   uint32_t meter_id,
17375                                   uint64_t action_flags,
17376                                   bool *is_rss,
17377                                   uint8_t *hierarchy_domain,
17378                                   struct rte_mtr_error *error)
17379 {
17380         struct mlx5_priv *priv = dev->data->dev_private;
17381         struct mlx5_flow_meter_info *fm;
17382         struct mlx5_flow_meter_policy *policy;
17383         uint8_t cnt = 1;
17384
17385         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
17386                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
17387                 return -rte_mtr_error_set(error, EINVAL,
17388                                         RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN,
17389                                         NULL,
17390                                         "Multiple fate actions not supported.");
17391         while (true) {
17392                 fm = mlx5_flow_meter_find(priv, meter_id, NULL);
17393                 if (!fm)
17394                         return -rte_mtr_error_set(error, EINVAL,
17395                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17396                                         "Meter not found in meter hierarchy.");
17397                 if (fm->def_policy)
17398                         return -rte_mtr_error_set(error, EINVAL,
17399                                         RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17400                         "Non termination meter not supported in hierarchy.");
17401                 policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
17402                 MLX5_ASSERT(policy);
17403                 if (!policy->is_hierarchy) {
17404                         if (policy->transfer)
17405                                 *hierarchy_domain |=
17406                                                 MLX5_MTR_DOMAIN_TRANSFER_BIT;
17407                         if (policy->ingress)
17408                                 *hierarchy_domain |=
17409                                                 MLX5_MTR_DOMAIN_INGRESS_BIT;
17410                         if (policy->egress)
17411                                 *hierarchy_domain |= MLX5_MTR_DOMAIN_EGRESS_BIT;
17412                         *is_rss = policy->is_rss;
17413                         break;
17414                 }
17415                 meter_id = policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id;
17416                 if (++cnt >= MLX5_MTR_CHAIN_MAX_NUM)
17417                         return -rte_mtr_error_set(error, EINVAL,
17418                                         RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
17419                                         "Exceed max hierarchy meter number.");
17420         }
17421         return 0;
17422 }
17423
17424 /**
17425  * Validate meter policy actions.
17426  * Dispatcher for action type specific validation.
17427  *
17428  * @param[in] dev
17429  *   Pointer to the Ethernet device structure.
17430  * @param[in] action
17431  *   The meter policy action object to validate.
17432  * @param[in] attr
17433  *   Attributes of flow to determine steering domain.
17434  * @param[out] error
17435  *   Perform verbose error reporting if not NULL. Initialized in case of
17436  *   error only.
17437  *
17438  * @return
17439  *   0 on success, otherwise negative errno value.
17440  */
17441 static int
17442 flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
17443                         const struct rte_flow_action *actions[RTE_COLORS],
17444                         struct rte_flow_attr *attr,
17445                         bool *is_rss,
17446                         uint8_t *domain_bitmap,
17447                         bool *is_def_policy,
17448                         struct rte_mtr_error *error)
17449 {
17450         struct mlx5_priv *priv = dev->data->dev_private;
17451         struct mlx5_dev_config *dev_conf = &priv->config;
17452         const struct rte_flow_action *act;
17453         uint64_t action_flags = 0;
17454         int actions_n;
17455         int i, ret;
17456         struct rte_flow_error flow_err;
17457         uint8_t domain_color[RTE_COLORS] = {0};
17458         uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
17459         uint8_t hierarchy_domain = 0;
17460         const struct rte_flow_action_meter *mtr;
17461
17462         if (!priv->config.dv_esw_en)
17463                 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
17464         *domain_bitmap = def_domain;
17465         if (actions[RTE_COLOR_YELLOW] &&
17466                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_END)
17467                 return -rte_mtr_error_set(error, ENOTSUP,
17468                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17469                                 NULL,
17470                                 "Yellow color does not support any action.");
17471         if (actions[RTE_COLOR_YELLOW] &&
17472                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_DROP)
17473                 return -rte_mtr_error_set(error, ENOTSUP,
17474                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17475                                 NULL, "Red color only supports drop action.");
17476         /*
17477          * Check default policy actions:
17478          * Green/Yellow: no action, Red: drop action
17479          */
17480         if ((!actions[RTE_COLOR_GREEN] ||
17481                 actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)) {
17482                 *is_def_policy = true;
17483                 return 0;
17484         }
17485         flow_err.message = NULL;
17486         for (i = 0; i < RTE_COLORS; i++) {
17487                 act = actions[i];
17488                 for (action_flags = 0, actions_n = 0;
17489                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
17490                         act++) {
17491                         if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
17492                                 return -rte_mtr_error_set(error, ENOTSUP,
17493                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17494                                           NULL, "too many actions");
17495                         switch (act->type) {
17496                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
17497                                 if (!priv->config.dv_esw_en)
17498                                         return -rte_mtr_error_set(error,
17499                                         ENOTSUP,
17500                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17501                                         NULL, "PORT action validate check"
17502                                         " fail for ESW disable");
17503                                 ret = flow_dv_validate_action_port_id(dev,
17504                                                 action_flags,
17505                                                 act, attr, &flow_err);
17506                                 if (ret)
17507                                         return -rte_mtr_error_set(error,
17508                                         ENOTSUP,
17509                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17510                                         NULL, flow_err.message ?
17511                                         flow_err.message :
17512                                         "PORT action validate check fail");
17513                                 ++actions_n;
17514                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
17515                                 break;
17516                         case RTE_FLOW_ACTION_TYPE_MARK:
17517                                 ret = flow_dv_validate_action_mark(dev, act,
17518                                                            action_flags,
17519                                                            attr, &flow_err);
17520                                 if (ret < 0)
17521                                         return -rte_mtr_error_set(error,
17522                                         ENOTSUP,
17523                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17524                                         NULL, flow_err.message ?
17525                                         flow_err.message :
17526                                         "Mark action validate check fail");
17527                                 if (dev_conf->dv_xmeta_en !=
17528                                         MLX5_XMETA_MODE_LEGACY)
17529                                         return -rte_mtr_error_set(error,
17530                                         ENOTSUP,
17531                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17532                                         NULL, "Extend MARK action is "
17533                                         "not supported. Please try use "
17534                                         "default policy for meter.");
17535                                 action_flags |= MLX5_FLOW_ACTION_MARK;
17536                                 ++actions_n;
17537                                 break;
17538                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
17539                                 ret = flow_dv_validate_action_set_tag(dev,
17540                                                         act, action_flags,
17541                                                         attr, &flow_err);
17542                                 if (ret)
17543                                         return -rte_mtr_error_set(error,
17544                                         ENOTSUP,
17545                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17546                                         NULL, flow_err.message ?
17547                                         flow_err.message :
17548                                         "Set tag action validate check fail");
17549                                 /*
17550                                  * Count all modify-header actions
17551                                  * as one action.
17552                                  */
17553                                 if (!(action_flags &
17554                                         MLX5_FLOW_MODIFY_HDR_ACTIONS))
17555                                         ++actions_n;
17556                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
17557                                 break;
17558                         case RTE_FLOW_ACTION_TYPE_DROP:
17559                                 ret = mlx5_flow_validate_action_drop
17560                                         (action_flags,
17561                                         attr, &flow_err);
17562                                 if (ret < 0)
17563                                         return -rte_mtr_error_set(error,
17564                                         ENOTSUP,
17565                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17566                                         NULL, flow_err.message ?
17567                                         flow_err.message :
17568                                         "Drop action validate check fail");
17569                                 action_flags |= MLX5_FLOW_ACTION_DROP;
17570                                 ++actions_n;
17571                                 break;
17572                         case RTE_FLOW_ACTION_TYPE_QUEUE:
17573                                 /*
17574                                  * Check whether extensive
17575                                  * metadata feature is engaged.
17576                                  */
17577                                 if (dev_conf->dv_flow_en &&
17578                                         (dev_conf->dv_xmeta_en !=
17579                                         MLX5_XMETA_MODE_LEGACY) &&
17580                                         mlx5_flow_ext_mreg_supported(dev))
17581                                         return -rte_mtr_error_set(error,
17582                                           ENOTSUP,
17583                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17584                                           NULL, "Queue action with meta "
17585                                           "is not supported. Please try use "
17586                                           "default policy for meter.");
17587                                 ret = mlx5_flow_validate_action_queue(act,
17588                                                         action_flags, dev,
17589                                                         attr, &flow_err);
17590                                 if (ret < 0)
17591                                         return -rte_mtr_error_set(error,
17592                                           ENOTSUP,
17593                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17594                                           NULL, flow_err.message ?
17595                                           flow_err.message :
17596                                           "Queue action validate check fail");
17597                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
17598                                 ++actions_n;
17599                                 break;
17600                         case RTE_FLOW_ACTION_TYPE_RSS:
17601                                 if (dev_conf->dv_flow_en &&
17602                                         (dev_conf->dv_xmeta_en !=
17603                                         MLX5_XMETA_MODE_LEGACY) &&
17604                                         mlx5_flow_ext_mreg_supported(dev))
17605                                         return -rte_mtr_error_set(error,
17606                                           ENOTSUP,
17607                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17608                                           NULL, "RSS action with meta "
17609                                           "is not supported. Please try use "
17610                                           "default policy for meter.");
17611                                 ret = mlx5_validate_action_rss(dev, act,
17612                                                 &flow_err);
17613                                 if (ret < 0)
17614                                         return -rte_mtr_error_set(error,
17615                                           ENOTSUP,
17616                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17617                                           NULL, flow_err.message ?
17618                                           flow_err.message :
17619                                           "RSS action validate check fail");
17620                                 action_flags |= MLX5_FLOW_ACTION_RSS;
17621                                 ++actions_n;
17622                                 *is_rss = true;
17623                                 break;
17624                         case RTE_FLOW_ACTION_TYPE_JUMP:
17625                                 ret = flow_dv_validate_action_jump(dev,
17626                                         NULL, act, action_flags,
17627                                         attr, true, &flow_err);
17628                                 if (ret)
17629                                         return -rte_mtr_error_set(error,
17630                                           ENOTSUP,
17631                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17632                                           NULL, flow_err.message ?
17633                                           flow_err.message :
17634                                           "Jump action validate check fail");
17635                                 ++actions_n;
17636                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
17637                                 break;
17638                         case RTE_FLOW_ACTION_TYPE_METER:
17639                                 if (i != RTE_COLOR_GREEN)
17640                                         return -rte_mtr_error_set(error,
17641                                                 ENOTSUP,
17642                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17643                                                 NULL, flow_err.message ?
17644                                                 flow_err.message :
17645                                   "Meter hierarchy only supports GREEN color.");
17646                                 mtr = act->conf;
17647                                 ret = flow_dv_validate_policy_mtr_hierarchy(dev,
17648                                                         mtr->mtr_id,
17649                                                         action_flags,
17650                                                         is_rss,
17651                                                         &hierarchy_domain,
17652                                                         error);
17653                                 if (ret)
17654                                         return ret;
17655                                 ++actions_n;
17656                                 action_flags |=
17657                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
17658                                 break;
17659                         default:
17660                                 return -rte_mtr_error_set(error, ENOTSUP,
17661                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17662                                         NULL,
17663                                         "Doesn't support optional action");
17664                         }
17665                 }
17666                 /* Yellow is not supported, just skip. */
17667                 if (i == RTE_COLOR_YELLOW)
17668                         continue;
17669                 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
17670                         domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
17671                 else if ((action_flags &
17672                         (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
17673                         (action_flags & MLX5_FLOW_ACTION_MARK))
17674                         /*
17675                          * Only support MLX5_XMETA_MODE_LEGACY
17676                          * so MARK action only in ingress domain.
17677                          */
17678                         domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
17679                 else if (action_flags &
17680                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
17681                         domain_color[i] = hierarchy_domain;
17682                 else
17683                         domain_color[i] = def_domain;
17684                 /*
17685                  * Validate the drop action mutual exclusion
17686                  * with other actions. Drop action is mutually-exclusive
17687                  * with any other action, except for Count action.
17688                  */
17689                 if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
17690                         (action_flags & ~MLX5_FLOW_ACTION_DROP)) {
17691                         return -rte_mtr_error_set(error, ENOTSUP,
17692                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17693                                 NULL, "Drop action is mutually-exclusive "
17694                                 "with any other action");
17695                 }
17696                 /* Eswitch has few restrictions on using items and actions */
17697                 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
17698                         if (!mlx5_flow_ext_mreg_supported(dev) &&
17699                                 action_flags & MLX5_FLOW_ACTION_MARK)
17700                                 return -rte_mtr_error_set(error, ENOTSUP,
17701                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17702                                         NULL, "unsupported action MARK");
17703                         if (action_flags & MLX5_FLOW_ACTION_QUEUE)
17704                                 return -rte_mtr_error_set(error, ENOTSUP,
17705                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17706                                         NULL, "unsupported action QUEUE");
17707                         if (action_flags & MLX5_FLOW_ACTION_RSS)
17708                                 return -rte_mtr_error_set(error, ENOTSUP,
17709                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17710                                         NULL, "unsupported action RSS");
17711                         if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
17712                                 return -rte_mtr_error_set(error, ENOTSUP,
17713                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17714                                         NULL, "no fate action is found");
17715                 } else {
17716                         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) &&
17717                                 (domain_color[i] &
17718                                 MLX5_MTR_DOMAIN_INGRESS_BIT)) {
17719                                 if ((domain_color[i] &
17720                                         MLX5_MTR_DOMAIN_EGRESS_BIT))
17721                                         domain_color[i] =
17722                                         MLX5_MTR_DOMAIN_EGRESS_BIT;
17723                                 else
17724                                         return -rte_mtr_error_set(error,
17725                                         ENOTSUP,
17726                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17727                                         NULL, "no fate action is found");
17728                         }
17729                 }
17730                 if (domain_color[i] != def_domain)
17731                         *domain_bitmap = domain_color[i];
17732         }
17733         return 0;
17734 }
17735
17736 static int
17737 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
17738 {
17739         struct mlx5_priv *priv = dev->data->dev_private;
17740         int ret = 0;
17741
17742         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
17743                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
17744                                                 flags);
17745                 if (ret != 0)
17746                         return ret;
17747         }
17748         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
17749                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
17750                 if (ret != 0)
17751                         return ret;
17752         }
17753         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
17754                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
17755                 if (ret != 0)
17756                         return ret;
17757         }
17758         return 0;
17759 }
17760
17761 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
17762         .validate = flow_dv_validate,
17763         .prepare = flow_dv_prepare,
17764         .translate = flow_dv_translate,
17765         .apply = flow_dv_apply,
17766         .remove = flow_dv_remove,
17767         .destroy = flow_dv_destroy,
17768         .query = flow_dv_query,
17769         .create_mtr_tbls = flow_dv_create_mtr_tbls,
17770         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
17771         .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
17772         .create_meter = flow_dv_mtr_alloc,
17773         .free_meter = flow_dv_aso_mtr_release_to_pool,
17774         .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
17775         .create_mtr_acts = flow_dv_create_mtr_policy_acts,
17776         .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
17777         .create_policy_rules = flow_dv_create_policy_rules,
17778         .destroy_policy_rules = flow_dv_destroy_policy_rules,
17779         .create_def_policy = flow_dv_create_def_policy,
17780         .destroy_def_policy = flow_dv_destroy_def_policy,
17781         .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
17782         .meter_hierarchy_rule_create = flow_dv_meter_hierarchy_rule_create,
17783         .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
17784         .counter_alloc = flow_dv_counter_allocate,
17785         .counter_free = flow_dv_counter_free,
17786         .counter_query = flow_dv_counter_query,
17787         .get_aged_flows = flow_get_aged_flows,
17788         .action_validate = flow_dv_action_validate,
17789         .action_create = flow_dv_action_create,
17790         .action_destroy = flow_dv_action_destroy,
17791         .action_update = flow_dv_action_update,
17792         .action_query = flow_dv_action_query,
17793         .sync_domain = flow_dv_sync_domain,
17794 };
17795
17796 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
17797