net/ice: support 256 queues
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ip.h>
20 #include <rte_gre.h>
21 #include <rte_vxlan.h>
22 #include <rte_gtp.h>
23 #include <rte_eal_paging.h>
24 #include <rte_mpls.h>
25 #include <rte_mtr.h>
26 #include <rte_mtr_driver.h>
27 #include <rte_tailq.h>
28
29 #include <mlx5_glue.h>
30 #include <mlx5_devx_cmds.h>
31 #include <mlx5_prm.h>
32 #include <mlx5_malloc.h>
33
34 #include "mlx5_defs.h"
35 #include "mlx5.h"
36 #include "mlx5_common_os.h"
37 #include "mlx5_flow.h"
38 #include "mlx5_flow_os.h"
39 #include "mlx5_rx.h"
40 #include "mlx5_tx.h"
41 #include "rte_pmd_mlx5.h"
42
43 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
44
45 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
46 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
47 #endif
48
49 #ifndef HAVE_MLX5DV_DR_ESWITCH
50 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
51 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
52 #endif
53 #endif
54
55 #ifndef HAVE_MLX5DV_DR
56 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
57 #endif
58
59 /* VLAN header definitions */
60 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
61 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
62 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
63 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
64 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
65
66 union flow_dv_attr {
67         struct {
68                 uint32_t valid:1;
69                 uint32_t ipv4:1;
70                 uint32_t ipv6:1;
71                 uint32_t tcp:1;
72                 uint32_t udp:1;
73                 uint32_t reserved:27;
74         };
75         uint32_t attr;
76 };
77
78 static int
79 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
80                              struct mlx5_flow_tbl_resource *tbl);
81
82 static int
83 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
84                                      uint32_t encap_decap_idx);
85
86 static int
87 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
88                                         uint32_t port_id);
89 static void
90 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
91
92 static int
93 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
94                                   uint32_t rix_jump);
95
96 static int16_t
97 flow_dv_get_esw_manager_vport_id(struct rte_eth_dev *dev)
98 {
99         struct mlx5_priv *priv = dev->data->dev_private;
100         struct mlx5_common_device *cdev = priv->sh->cdev;
101
102         if (cdev->config.hca_attr.esw_mgr_vport_id_valid)
103                 return (int16_t)cdev->config.hca_attr.esw_mgr_vport_id;
104
105         if (priv->pci_dev == NULL)
106                 return 0;
107         switch (priv->pci_dev->id.device_id) {
108         case PCI_DEVICE_ID_MELLANOX_CONNECTX5BF:
109         case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF:
110         case PCI_DEVICE_ID_MELLANOX_CONNECTX7BF:
111                 return (int16_t)0xfffe;
112         default:
113                 return 0;
114         }
115 }
116
117 /**
118  * Initialize flow attributes structure according to flow items' types.
119  *
120  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
121  * mode. For tunnel mode, the items to be modified are the outermost ones.
122  *
123  * @param[in] item
124  *   Pointer to item specification.
125  * @param[out] attr
126  *   Pointer to flow attributes structure.
127  * @param[in] dev_flow
128  *   Pointer to the sub flow.
129  * @param[in] tunnel_decap
130  *   Whether action is after tunnel decapsulation.
131  */
132 static void
133 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
134                   struct mlx5_flow *dev_flow, bool tunnel_decap)
135 {
136         uint64_t layers = dev_flow->handle->layers;
137
138         /*
139          * If layers is already initialized, it means this dev_flow is the
140          * suffix flow, the layers flags is set by the prefix flow. Need to
141          * use the layer flags from prefix flow as the suffix flow may not
142          * have the user defined items as the flow is split.
143          */
144         if (layers) {
145                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
146                         attr->ipv4 = 1;
147                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
148                         attr->ipv6 = 1;
149                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
150                         attr->tcp = 1;
151                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
152                         attr->udp = 1;
153                 attr->valid = 1;
154                 return;
155         }
156         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
157                 uint8_t next_protocol = 0xff;
158                 switch (item->type) {
159                 case RTE_FLOW_ITEM_TYPE_GRE:
160                 case RTE_FLOW_ITEM_TYPE_NVGRE:
161                 case RTE_FLOW_ITEM_TYPE_VXLAN:
162                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
163                 case RTE_FLOW_ITEM_TYPE_GENEVE:
164                 case RTE_FLOW_ITEM_TYPE_MPLS:
165                         if (tunnel_decap)
166                                 attr->attr = 0;
167                         break;
168                 case RTE_FLOW_ITEM_TYPE_IPV4:
169                         if (!attr->ipv6)
170                                 attr->ipv4 = 1;
171                         if (item->mask != NULL &&
172                             ((const struct rte_flow_item_ipv4 *)
173                             item->mask)->hdr.next_proto_id)
174                                 next_protocol =
175                                     ((const struct rte_flow_item_ipv4 *)
176                                       (item->spec))->hdr.next_proto_id &
177                                     ((const struct rte_flow_item_ipv4 *)
178                                       (item->mask))->hdr.next_proto_id;
179                         if ((next_protocol == IPPROTO_IPIP ||
180                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
181                                 attr->attr = 0;
182                         break;
183                 case RTE_FLOW_ITEM_TYPE_IPV6:
184                         if (!attr->ipv4)
185                                 attr->ipv6 = 1;
186                         if (item->mask != NULL &&
187                             ((const struct rte_flow_item_ipv6 *)
188                             item->mask)->hdr.proto)
189                                 next_protocol =
190                                     ((const struct rte_flow_item_ipv6 *)
191                                       (item->spec))->hdr.proto &
192                                     ((const struct rte_flow_item_ipv6 *)
193                                       (item->mask))->hdr.proto;
194                         if ((next_protocol == IPPROTO_IPIP ||
195                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
196                                 attr->attr = 0;
197                         break;
198                 case RTE_FLOW_ITEM_TYPE_UDP:
199                         if (!attr->tcp)
200                                 attr->udp = 1;
201                         break;
202                 case RTE_FLOW_ITEM_TYPE_TCP:
203                         if (!attr->udp)
204                                 attr->tcp = 1;
205                         break;
206                 default:
207                         break;
208                 }
209         }
210         attr->valid = 1;
211 }
212
213 /*
214  * Convert rte_mtr_color to mlx5 color.
215  *
216  * @param[in] rcol
217  *   rte_mtr_color.
218  *
219  * @return
220  *   mlx5 color.
221  */
222 static inline int
223 rte_col_2_mlx5_col(enum rte_color rcol)
224 {
225         switch (rcol) {
226         case RTE_COLOR_GREEN:
227                 return MLX5_FLOW_COLOR_GREEN;
228         case RTE_COLOR_YELLOW:
229                 return MLX5_FLOW_COLOR_YELLOW;
230         case RTE_COLOR_RED:
231                 return MLX5_FLOW_COLOR_RED;
232         default:
233                 break;
234         }
235         return MLX5_FLOW_COLOR_UNDEFINED;
236 }
237
238 struct field_modify_info {
239         uint32_t size; /* Size of field in protocol header, in bytes. */
240         uint32_t offset; /* Offset of field in protocol header, in bytes. */
241         enum mlx5_modification_field id;
242 };
243
244 struct field_modify_info modify_eth[] = {
245         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
246         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
247         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
248         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
249         {0, 0, 0},
250 };
251
252 struct field_modify_info modify_vlan_out_first_vid[] = {
253         /* Size in bits !!! */
254         {12, 0, MLX5_MODI_OUT_FIRST_VID},
255         {0, 0, 0},
256 };
257
258 struct field_modify_info modify_ipv4[] = {
259         {1,  1, MLX5_MODI_OUT_IP_DSCP},
260         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
261         {4, 12, MLX5_MODI_OUT_SIPV4},
262         {4, 16, MLX5_MODI_OUT_DIPV4},
263         {0, 0, 0},
264 };
265
266 struct field_modify_info modify_ipv6[] = {
267         {1,  0, MLX5_MODI_OUT_IP_DSCP},
268         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
269         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
270         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
271         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
272         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
273         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
274         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
275         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
276         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
277         {0, 0, 0},
278 };
279
280 struct field_modify_info modify_udp[] = {
281         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
282         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
283         {0, 0, 0},
284 };
285
286 struct field_modify_info modify_tcp[] = {
287         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
288         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
289         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
290         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
291         {0, 0, 0},
292 };
293
294 static void
295 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
296                           uint8_t next_protocol, uint64_t *item_flags,
297                           int *tunnel)
298 {
299         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
300                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
301         if (next_protocol == IPPROTO_IPIP) {
302                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
303                 *tunnel = 1;
304         }
305         if (next_protocol == IPPROTO_IPV6) {
306                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
307                 *tunnel = 1;
308         }
309 }
310
311 static inline struct mlx5_hlist *
312 flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,
313                      const char *name, uint32_t size, bool direct_key,
314                      bool lcores_share, void *ctx,
315                      mlx5_list_create_cb cb_create,
316                      mlx5_list_match_cb cb_match,
317                      mlx5_list_remove_cb cb_remove,
318                      mlx5_list_clone_cb cb_clone,
319                      mlx5_list_clone_free_cb cb_clone_free,
320                      struct rte_flow_error *error)
321 {
322         struct mlx5_hlist *hl;
323         struct mlx5_hlist *expected = NULL;
324         char s[MLX5_NAME_SIZE];
325
326         hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
327         if (likely(hl))
328                 return hl;
329         snprintf(s, sizeof(s), "%s_%s", sh->ibdev_name, name);
330         hl = mlx5_hlist_create(s, size, direct_key, lcores_share,
331                         ctx, cb_create, cb_match, cb_remove, cb_clone,
332                         cb_clone_free);
333         if (!hl) {
334                 DRV_LOG(ERR, "%s hash creation failed", name);
335                 rte_flow_error_set(error, ENOMEM,
336                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
337                                    "cannot allocate resource memory");
338                 return NULL;
339         }
340         if (!__atomic_compare_exchange_n(phl, &expected, hl, false,
341                                          __ATOMIC_SEQ_CST,
342                                          __ATOMIC_SEQ_CST)) {
343                 mlx5_hlist_destroy(hl);
344                 hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
345         }
346         return hl;
347 }
348
349 /* Update VLAN's VID/PCP based on input rte_flow_action.
350  *
351  * @param[in] action
352  *   Pointer to struct rte_flow_action.
353  * @param[out] vlan
354  *   Pointer to struct rte_vlan_hdr.
355  */
356 static void
357 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
358                          struct rte_vlan_hdr *vlan)
359 {
360         uint16_t vlan_tci;
361         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
362                 vlan_tci =
363                     ((const struct rte_flow_action_of_set_vlan_pcp *)
364                                                action->conf)->vlan_pcp;
365                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
366                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
367                 vlan->vlan_tci |= vlan_tci;
368         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
369                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
370                 vlan->vlan_tci |= rte_be_to_cpu_16
371                     (((const struct rte_flow_action_of_set_vlan_vid *)
372                                              action->conf)->vlan_vid);
373         }
374 }
375
376 /**
377  * Fetch 1, 2, 3 or 4 byte field from the byte array
378  * and return as unsigned integer in host-endian format.
379  *
380  * @param[in] data
381  *   Pointer to data array.
382  * @param[in] size
383  *   Size of field to extract.
384  *
385  * @return
386  *   converted field in host endian format.
387  */
388 static inline uint32_t
389 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
390 {
391         uint32_t ret;
392
393         switch (size) {
394         case 1:
395                 ret = *data;
396                 break;
397         case 2:
398                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
399                 break;
400         case 3:
401                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
402                 ret = (ret << 8) | *(data + sizeof(uint16_t));
403                 break;
404         case 4:
405                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
406                 break;
407         default:
408                 MLX5_ASSERT(false);
409                 ret = 0;
410                 break;
411         }
412         return ret;
413 }
414
415 /**
416  * Convert modify-header action to DV specification.
417  *
418  * Data length of each action is determined by provided field description
419  * and the item mask. Data bit offset and width of each action is determined
420  * by provided item mask.
421  *
422  * @param[in] item
423  *   Pointer to item specification.
424  * @param[in] field
425  *   Pointer to field modification information.
426  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
427  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
428  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
429  * @param[in] dcopy
430  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
431  *   Negative offset value sets the same offset as source offset.
432  *   size field is ignored, value is taken from source field.
433  * @param[in,out] resource
434  *   Pointer to the modify-header resource.
435  * @param[in] type
436  *   Type of modification.
437  * @param[out] error
438  *   Pointer to the error structure.
439  *
440  * @return
441  *   0 on success, a negative errno value otherwise and rte_errno is set.
442  */
443 static int
444 flow_dv_convert_modify_action(struct rte_flow_item *item,
445                               struct field_modify_info *field,
446                               struct field_modify_info *dcopy,
447                               struct mlx5_flow_dv_modify_hdr_resource *resource,
448                               uint32_t type, struct rte_flow_error *error)
449 {
450         uint32_t i = resource->actions_num;
451         struct mlx5_modification_cmd *actions = resource->actions;
452         uint32_t carry_b = 0;
453
454         /*
455          * The item and mask are provided in big-endian format.
456          * The fields should be presented as in big-endian format either.
457          * Mask must be always present, it defines the actual field width.
458          */
459         MLX5_ASSERT(item->mask);
460         MLX5_ASSERT(field->size);
461         do {
462                 uint32_t size_b;
463                 uint32_t off_b;
464                 uint32_t mask;
465                 uint32_t data;
466                 bool next_field = true;
467                 bool next_dcopy = true;
468
469                 if (i >= MLX5_MAX_MODIFY_NUM)
470                         return rte_flow_error_set(error, EINVAL,
471                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
472                                  "too many items to modify");
473                 /* Fetch variable byte size mask from the array. */
474                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
475                                            field->offset, field->size);
476                 if (!mask) {
477                         ++field;
478                         continue;
479                 }
480                 /* Deduce actual data width in bits from mask value. */
481                 off_b = rte_bsf32(mask) + carry_b;
482                 size_b = sizeof(uint32_t) * CHAR_BIT -
483                          off_b - __builtin_clz(mask);
484                 MLX5_ASSERT(size_b);
485                 actions[i] = (struct mlx5_modification_cmd) {
486                         .action_type = type,
487                         .field = field->id,
488                         .offset = off_b,
489                         .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ?
490                                 0 : size_b,
491                 };
492                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
493                         MLX5_ASSERT(dcopy);
494                         actions[i].dst_field = dcopy->id;
495                         actions[i].dst_offset =
496                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
497                         /* Convert entire record to big-endian format. */
498                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
499                         /*
500                          * Destination field overflow. Copy leftovers of
501                          * a source field to the next destination field.
502                          */
503                         carry_b = 0;
504                         if ((size_b > dcopy->size * CHAR_BIT - dcopy->offset) &&
505                             dcopy->size != 0) {
506                                 actions[i].length =
507                                         dcopy->size * CHAR_BIT - dcopy->offset;
508                                 carry_b = actions[i].length;
509                                 next_field = false;
510                         }
511                         /*
512                          * Not enough bits in a source filed to fill a
513                          * destination field. Switch to the next source.
514                          */
515                         if ((size_b < dcopy->size * CHAR_BIT - dcopy->offset) &&
516                             (size_b == field->size * CHAR_BIT - off_b)) {
517                                 actions[i].length =
518                                         field->size * CHAR_BIT - off_b;
519                                 dcopy->offset += actions[i].length;
520                                 next_dcopy = false;
521                         }
522                         if (next_dcopy)
523                                 ++dcopy;
524                 } else {
525                         MLX5_ASSERT(item->spec);
526                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
527                                                    field->offset, field->size);
528                         /* Shift out the trailing masked bits from data. */
529                         data = (data & mask) >> off_b;
530                         actions[i].data1 = rte_cpu_to_be_32(data);
531                 }
532                 /* Convert entire record to expected big-endian format. */
533                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
534                 if (next_field)
535                         ++field;
536                 ++i;
537         } while (field->size);
538         if (resource->actions_num == i)
539                 return rte_flow_error_set(error, EINVAL,
540                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
541                                           "invalid modification flow item");
542         resource->actions_num = i;
543         return 0;
544 }
545
546 /**
547  * Convert modify-header set IPv4 address action to DV specification.
548  *
549  * @param[in,out] resource
550  *   Pointer to the modify-header resource.
551  * @param[in] action
552  *   Pointer to action specification.
553  * @param[out] error
554  *   Pointer to the error structure.
555  *
556  * @return
557  *   0 on success, a negative errno value otherwise and rte_errno is set.
558  */
559 static int
560 flow_dv_convert_action_modify_ipv4
561                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
562                          const struct rte_flow_action *action,
563                          struct rte_flow_error *error)
564 {
565         const struct rte_flow_action_set_ipv4 *conf =
566                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
567         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
568         struct rte_flow_item_ipv4 ipv4;
569         struct rte_flow_item_ipv4 ipv4_mask;
570
571         memset(&ipv4, 0, sizeof(ipv4));
572         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
573         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
574                 ipv4.hdr.src_addr = conf->ipv4_addr;
575                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
576         } else {
577                 ipv4.hdr.dst_addr = conf->ipv4_addr;
578                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
579         }
580         item.spec = &ipv4;
581         item.mask = &ipv4_mask;
582         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
583                                              MLX5_MODIFICATION_TYPE_SET, error);
584 }
585
586 /**
587  * Convert modify-header set IPv6 address action to DV specification.
588  *
589  * @param[in,out] resource
590  *   Pointer to the modify-header resource.
591  * @param[in] action
592  *   Pointer to action specification.
593  * @param[out] error
594  *   Pointer to the error structure.
595  *
596  * @return
597  *   0 on success, a negative errno value otherwise and rte_errno is set.
598  */
599 static int
600 flow_dv_convert_action_modify_ipv6
601                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
602                          const struct rte_flow_action *action,
603                          struct rte_flow_error *error)
604 {
605         const struct rte_flow_action_set_ipv6 *conf =
606                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
607         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
608         struct rte_flow_item_ipv6 ipv6;
609         struct rte_flow_item_ipv6 ipv6_mask;
610
611         memset(&ipv6, 0, sizeof(ipv6));
612         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
613         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
614                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
615                        sizeof(ipv6.hdr.src_addr));
616                 memcpy(&ipv6_mask.hdr.src_addr,
617                        &rte_flow_item_ipv6_mask.hdr.src_addr,
618                        sizeof(ipv6.hdr.src_addr));
619         } else {
620                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
621                        sizeof(ipv6.hdr.dst_addr));
622                 memcpy(&ipv6_mask.hdr.dst_addr,
623                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
624                        sizeof(ipv6.hdr.dst_addr));
625         }
626         item.spec = &ipv6;
627         item.mask = &ipv6_mask;
628         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
629                                              MLX5_MODIFICATION_TYPE_SET, error);
630 }
631
632 /**
633  * Convert modify-header set MAC address action to DV specification.
634  *
635  * @param[in,out] resource
636  *   Pointer to the modify-header resource.
637  * @param[in] action
638  *   Pointer to action specification.
639  * @param[out] error
640  *   Pointer to the error structure.
641  *
642  * @return
643  *   0 on success, a negative errno value otherwise and rte_errno is set.
644  */
645 static int
646 flow_dv_convert_action_modify_mac
647                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
648                          const struct rte_flow_action *action,
649                          struct rte_flow_error *error)
650 {
651         const struct rte_flow_action_set_mac *conf =
652                 (const struct rte_flow_action_set_mac *)(action->conf);
653         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
654         struct rte_flow_item_eth eth;
655         struct rte_flow_item_eth eth_mask;
656
657         memset(&eth, 0, sizeof(eth));
658         memset(&eth_mask, 0, sizeof(eth_mask));
659         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
660                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
661                        sizeof(eth.src.addr_bytes));
662                 memcpy(&eth_mask.src.addr_bytes,
663                        &rte_flow_item_eth_mask.src.addr_bytes,
664                        sizeof(eth_mask.src.addr_bytes));
665         } else {
666                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
667                        sizeof(eth.dst.addr_bytes));
668                 memcpy(&eth_mask.dst.addr_bytes,
669                        &rte_flow_item_eth_mask.dst.addr_bytes,
670                        sizeof(eth_mask.dst.addr_bytes));
671         }
672         item.spec = &eth;
673         item.mask = &eth_mask;
674         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
675                                              MLX5_MODIFICATION_TYPE_SET, error);
676 }
677
678 /**
679  * Convert modify-header set VLAN VID action to DV specification.
680  *
681  * @param[in,out] resource
682  *   Pointer to the modify-header resource.
683  * @param[in] action
684  *   Pointer to action specification.
685  * @param[out] error
686  *   Pointer to the error structure.
687  *
688  * @return
689  *   0 on success, a negative errno value otherwise and rte_errno is set.
690  */
691 static int
692 flow_dv_convert_action_modify_vlan_vid
693                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
694                          const struct rte_flow_action *action,
695                          struct rte_flow_error *error)
696 {
697         const struct rte_flow_action_of_set_vlan_vid *conf =
698                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
699         int i = resource->actions_num;
700         struct mlx5_modification_cmd *actions = resource->actions;
701         struct field_modify_info *field = modify_vlan_out_first_vid;
702
703         if (i >= MLX5_MAX_MODIFY_NUM)
704                 return rte_flow_error_set(error, EINVAL,
705                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
706                          "too many items to modify");
707         actions[i] = (struct mlx5_modification_cmd) {
708                 .action_type = MLX5_MODIFICATION_TYPE_SET,
709                 .field = field->id,
710                 .length = field->size,
711                 .offset = field->offset,
712         };
713         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
714         actions[i].data1 = conf->vlan_vid;
715         actions[i].data1 = actions[i].data1 << 16;
716         resource->actions_num = ++i;
717         return 0;
718 }
719
720 /**
721  * Convert modify-header set TP action to DV specification.
722  *
723  * @param[in,out] resource
724  *   Pointer to the modify-header resource.
725  * @param[in] action
726  *   Pointer to action specification.
727  * @param[in] items
728  *   Pointer to rte_flow_item objects list.
729  * @param[in] attr
730  *   Pointer to flow attributes structure.
731  * @param[in] dev_flow
732  *   Pointer to the sub flow.
733  * @param[in] tunnel_decap
734  *   Whether action is after tunnel decapsulation.
735  * @param[out] error
736  *   Pointer to the error structure.
737  *
738  * @return
739  *   0 on success, a negative errno value otherwise and rte_errno is set.
740  */
741 static int
742 flow_dv_convert_action_modify_tp
743                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
744                          const struct rte_flow_action *action,
745                          const struct rte_flow_item *items,
746                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
747                          bool tunnel_decap, struct rte_flow_error *error)
748 {
749         const struct rte_flow_action_set_tp *conf =
750                 (const struct rte_flow_action_set_tp *)(action->conf);
751         struct rte_flow_item item;
752         struct rte_flow_item_udp udp;
753         struct rte_flow_item_udp udp_mask;
754         struct rte_flow_item_tcp tcp;
755         struct rte_flow_item_tcp tcp_mask;
756         struct field_modify_info *field;
757
758         if (!attr->valid)
759                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
760         if (attr->udp) {
761                 memset(&udp, 0, sizeof(udp));
762                 memset(&udp_mask, 0, sizeof(udp_mask));
763                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
764                         udp.hdr.src_port = conf->port;
765                         udp_mask.hdr.src_port =
766                                         rte_flow_item_udp_mask.hdr.src_port;
767                 } else {
768                         udp.hdr.dst_port = conf->port;
769                         udp_mask.hdr.dst_port =
770                                         rte_flow_item_udp_mask.hdr.dst_port;
771                 }
772                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
773                 item.spec = &udp;
774                 item.mask = &udp_mask;
775                 field = modify_udp;
776         } else {
777                 MLX5_ASSERT(attr->tcp);
778                 memset(&tcp, 0, sizeof(tcp));
779                 memset(&tcp_mask, 0, sizeof(tcp_mask));
780                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
781                         tcp.hdr.src_port = conf->port;
782                         tcp_mask.hdr.src_port =
783                                         rte_flow_item_tcp_mask.hdr.src_port;
784                 } else {
785                         tcp.hdr.dst_port = conf->port;
786                         tcp_mask.hdr.dst_port =
787                                         rte_flow_item_tcp_mask.hdr.dst_port;
788                 }
789                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
790                 item.spec = &tcp;
791                 item.mask = &tcp_mask;
792                 field = modify_tcp;
793         }
794         return flow_dv_convert_modify_action(&item, field, NULL, resource,
795                                              MLX5_MODIFICATION_TYPE_SET, error);
796 }
797
798 /**
799  * Convert modify-header set TTL action to DV specification.
800  *
801  * @param[in,out] resource
802  *   Pointer to the modify-header resource.
803  * @param[in] action
804  *   Pointer to action specification.
805  * @param[in] items
806  *   Pointer to rte_flow_item objects list.
807  * @param[in] attr
808  *   Pointer to flow attributes structure.
809  * @param[in] dev_flow
810  *   Pointer to the sub flow.
811  * @param[in] tunnel_decap
812  *   Whether action is after tunnel decapsulation.
813  * @param[out] error
814  *   Pointer to the error structure.
815  *
816  * @return
817  *   0 on success, a negative errno value otherwise and rte_errno is set.
818  */
819 static int
820 flow_dv_convert_action_modify_ttl
821                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
822                          const struct rte_flow_action *action,
823                          const struct rte_flow_item *items,
824                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
825                          bool tunnel_decap, struct rte_flow_error *error)
826 {
827         const struct rte_flow_action_set_ttl *conf =
828                 (const struct rte_flow_action_set_ttl *)(action->conf);
829         struct rte_flow_item item;
830         struct rte_flow_item_ipv4 ipv4;
831         struct rte_flow_item_ipv4 ipv4_mask;
832         struct rte_flow_item_ipv6 ipv6;
833         struct rte_flow_item_ipv6 ipv6_mask;
834         struct field_modify_info *field;
835
836         if (!attr->valid)
837                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
838         if (attr->ipv4) {
839                 memset(&ipv4, 0, sizeof(ipv4));
840                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
841                 ipv4.hdr.time_to_live = conf->ttl_value;
842                 ipv4_mask.hdr.time_to_live = 0xFF;
843                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
844                 item.spec = &ipv4;
845                 item.mask = &ipv4_mask;
846                 field = modify_ipv4;
847         } else {
848                 MLX5_ASSERT(attr->ipv6);
849                 memset(&ipv6, 0, sizeof(ipv6));
850                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
851                 ipv6.hdr.hop_limits = conf->ttl_value;
852                 ipv6_mask.hdr.hop_limits = 0xFF;
853                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
854                 item.spec = &ipv6;
855                 item.mask = &ipv6_mask;
856                 field = modify_ipv6;
857         }
858         return flow_dv_convert_modify_action(&item, field, NULL, resource,
859                                              MLX5_MODIFICATION_TYPE_SET, error);
860 }
861
862 /**
863  * Convert modify-header decrement TTL action to DV specification.
864  *
865  * @param[in,out] resource
866  *   Pointer to the modify-header resource.
867  * @param[in] action
868  *   Pointer to action specification.
869  * @param[in] items
870  *   Pointer to rte_flow_item objects list.
871  * @param[in] attr
872  *   Pointer to flow attributes structure.
873  * @param[in] dev_flow
874  *   Pointer to the sub flow.
875  * @param[in] tunnel_decap
876  *   Whether action is after tunnel decapsulation.
877  * @param[out] error
878  *   Pointer to the error structure.
879  *
880  * @return
881  *   0 on success, a negative errno value otherwise and rte_errno is set.
882  */
883 static int
884 flow_dv_convert_action_modify_dec_ttl
885                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
886                          const struct rte_flow_item *items,
887                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
888                          bool tunnel_decap, struct rte_flow_error *error)
889 {
890         struct rte_flow_item item;
891         struct rte_flow_item_ipv4 ipv4;
892         struct rte_flow_item_ipv4 ipv4_mask;
893         struct rte_flow_item_ipv6 ipv6;
894         struct rte_flow_item_ipv6 ipv6_mask;
895         struct field_modify_info *field;
896
897         if (!attr->valid)
898                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
899         if (attr->ipv4) {
900                 memset(&ipv4, 0, sizeof(ipv4));
901                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
902                 ipv4.hdr.time_to_live = 0xFF;
903                 ipv4_mask.hdr.time_to_live = 0xFF;
904                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
905                 item.spec = &ipv4;
906                 item.mask = &ipv4_mask;
907                 field = modify_ipv4;
908         } else {
909                 MLX5_ASSERT(attr->ipv6);
910                 memset(&ipv6, 0, sizeof(ipv6));
911                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
912                 ipv6.hdr.hop_limits = 0xFF;
913                 ipv6_mask.hdr.hop_limits = 0xFF;
914                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
915                 item.spec = &ipv6;
916                 item.mask = &ipv6_mask;
917                 field = modify_ipv6;
918         }
919         return flow_dv_convert_modify_action(&item, field, NULL, resource,
920                                              MLX5_MODIFICATION_TYPE_ADD, error);
921 }
922
923 /**
924  * Convert modify-header increment/decrement TCP Sequence number
925  * to DV specification.
926  *
927  * @param[in,out] resource
928  *   Pointer to the modify-header resource.
929  * @param[in] action
930  *   Pointer to action specification.
931  * @param[out] error
932  *   Pointer to the error structure.
933  *
934  * @return
935  *   0 on success, a negative errno value otherwise and rte_errno is set.
936  */
937 static int
938 flow_dv_convert_action_modify_tcp_seq
939                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
940                          const struct rte_flow_action *action,
941                          struct rte_flow_error *error)
942 {
943         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
944         uint64_t value = rte_be_to_cpu_32(*conf);
945         struct rte_flow_item item;
946         struct rte_flow_item_tcp tcp;
947         struct rte_flow_item_tcp tcp_mask;
948
949         memset(&tcp, 0, sizeof(tcp));
950         memset(&tcp_mask, 0, sizeof(tcp_mask));
951         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
952                 /*
953                  * The HW has no decrement operation, only increment operation.
954                  * To simulate decrement X from Y using increment operation
955                  * we need to add UINT32_MAX X times to Y.
956                  * Each adding of UINT32_MAX decrements Y by 1.
957                  */
958                 value *= UINT32_MAX;
959         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
960         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
961         item.type = RTE_FLOW_ITEM_TYPE_TCP;
962         item.spec = &tcp;
963         item.mask = &tcp_mask;
964         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
965                                              MLX5_MODIFICATION_TYPE_ADD, error);
966 }
967
968 /**
969  * Convert modify-header increment/decrement TCP Acknowledgment number
970  * to DV specification.
971  *
972  * @param[in,out] resource
973  *   Pointer to the modify-header resource.
974  * @param[in] action
975  *   Pointer to action specification.
976  * @param[out] error
977  *   Pointer to the error structure.
978  *
979  * @return
980  *   0 on success, a negative errno value otherwise and rte_errno is set.
981  */
982 static int
983 flow_dv_convert_action_modify_tcp_ack
984                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
985                          const struct rte_flow_action *action,
986                          struct rte_flow_error *error)
987 {
988         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
989         uint64_t value = rte_be_to_cpu_32(*conf);
990         struct rte_flow_item item;
991         struct rte_flow_item_tcp tcp;
992         struct rte_flow_item_tcp tcp_mask;
993
994         memset(&tcp, 0, sizeof(tcp));
995         memset(&tcp_mask, 0, sizeof(tcp_mask));
996         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
997                 /*
998                  * The HW has no decrement operation, only increment operation.
999                  * To simulate decrement X from Y using increment operation
1000                  * we need to add UINT32_MAX X times to Y.
1001                  * Each adding of UINT32_MAX decrements Y by 1.
1002                  */
1003                 value *= UINT32_MAX;
1004         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
1005         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
1006         item.type = RTE_FLOW_ITEM_TYPE_TCP;
1007         item.spec = &tcp;
1008         item.mask = &tcp_mask;
1009         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
1010                                              MLX5_MODIFICATION_TYPE_ADD, error);
1011 }
1012
1013 static enum mlx5_modification_field reg_to_field[] = {
1014         [REG_NON] = MLX5_MODI_OUT_NONE,
1015         [REG_A] = MLX5_MODI_META_DATA_REG_A,
1016         [REG_B] = MLX5_MODI_META_DATA_REG_B,
1017         [REG_C_0] = MLX5_MODI_META_REG_C_0,
1018         [REG_C_1] = MLX5_MODI_META_REG_C_1,
1019         [REG_C_2] = MLX5_MODI_META_REG_C_2,
1020         [REG_C_3] = MLX5_MODI_META_REG_C_3,
1021         [REG_C_4] = MLX5_MODI_META_REG_C_4,
1022         [REG_C_5] = MLX5_MODI_META_REG_C_5,
1023         [REG_C_6] = MLX5_MODI_META_REG_C_6,
1024         [REG_C_7] = MLX5_MODI_META_REG_C_7,
1025 };
1026
1027 /**
1028  * Convert register set to DV specification.
1029  *
1030  * @param[in,out] resource
1031  *   Pointer to the modify-header resource.
1032  * @param[in] action
1033  *   Pointer to action specification.
1034  * @param[out] error
1035  *   Pointer to the error structure.
1036  *
1037  * @return
1038  *   0 on success, a negative errno value otherwise and rte_errno is set.
1039  */
1040 static int
1041 flow_dv_convert_action_set_reg
1042                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1043                          const struct rte_flow_action *action,
1044                          struct rte_flow_error *error)
1045 {
1046         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
1047         struct mlx5_modification_cmd *actions = resource->actions;
1048         uint32_t i = resource->actions_num;
1049
1050         if (i >= MLX5_MAX_MODIFY_NUM)
1051                 return rte_flow_error_set(error, EINVAL,
1052                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1053                                           "too many items to modify");
1054         MLX5_ASSERT(conf->id != REG_NON);
1055         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
1056         actions[i] = (struct mlx5_modification_cmd) {
1057                 .action_type = MLX5_MODIFICATION_TYPE_SET,
1058                 .field = reg_to_field[conf->id],
1059                 .offset = conf->offset,
1060                 .length = conf->length,
1061         };
1062         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1063         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1064         ++i;
1065         resource->actions_num = i;
1066         return 0;
1067 }
1068
1069 /**
1070  * Convert SET_TAG action to DV specification.
1071  *
1072  * @param[in] dev
1073  *   Pointer to the rte_eth_dev structure.
1074  * @param[in,out] resource
1075  *   Pointer to the modify-header resource.
1076  * @param[in] conf
1077  *   Pointer to action specification.
1078  * @param[out] error
1079  *   Pointer to the error structure.
1080  *
1081  * @return
1082  *   0 on success, a negative errno value otherwise and rte_errno is set.
1083  */
1084 static int
1085 flow_dv_convert_action_set_tag
1086                         (struct rte_eth_dev *dev,
1087                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1088                          const struct rte_flow_action_set_tag *conf,
1089                          struct rte_flow_error *error)
1090 {
1091         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1092         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1093         struct rte_flow_item item = {
1094                 .spec = &data,
1095                 .mask = &mask,
1096         };
1097         struct field_modify_info reg_c_x[] = {
1098                 [1] = {0, 0, 0},
1099         };
1100         enum mlx5_modification_field reg_type;
1101         int ret;
1102
1103         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1104         if (ret < 0)
1105                 return ret;
1106         MLX5_ASSERT(ret != REG_NON);
1107         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1108         reg_type = reg_to_field[ret];
1109         MLX5_ASSERT(reg_type > 0);
1110         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1111         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1112                                              MLX5_MODIFICATION_TYPE_SET, error);
1113 }
1114
1115 /**
1116  * Convert internal COPY_REG action to DV specification.
1117  *
1118  * @param[in] dev
1119  *   Pointer to the rte_eth_dev structure.
1120  * @param[in,out] res
1121  *   Pointer to the modify-header resource.
1122  * @param[in] action
1123  *   Pointer to action specification.
1124  * @param[out] error
1125  *   Pointer to the error structure.
1126  *
1127  * @return
1128  *   0 on success, a negative errno value otherwise and rte_errno is set.
1129  */
1130 static int
1131 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1132                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1133                                  const struct rte_flow_action *action,
1134                                  struct rte_flow_error *error)
1135 {
1136         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1137         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1138         struct rte_flow_item item = {
1139                 .spec = NULL,
1140                 .mask = &mask,
1141         };
1142         struct field_modify_info reg_src[] = {
1143                 {4, 0, reg_to_field[conf->src]},
1144                 {0, 0, 0},
1145         };
1146         struct field_modify_info reg_dst = {
1147                 .offset = 0,
1148                 .id = reg_to_field[conf->dst],
1149         };
1150         /* Adjust reg_c[0] usage according to reported mask. */
1151         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1152                 struct mlx5_priv *priv = dev->data->dev_private;
1153                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1154
1155                 MLX5_ASSERT(reg_c0);
1156                 MLX5_ASSERT(priv->sh->config.dv_xmeta_en !=
1157                             MLX5_XMETA_MODE_LEGACY);
1158                 if (conf->dst == REG_C_0) {
1159                         /* Copy to reg_c[0], within mask only. */
1160                         reg_dst.offset = rte_bsf32(reg_c0);
1161                         mask = rte_cpu_to_be_32(reg_c0 >> reg_dst.offset);
1162                 } else {
1163                         reg_dst.offset = 0;
1164                         mask = rte_cpu_to_be_32(reg_c0);
1165                 }
1166         }
1167         return flow_dv_convert_modify_action(&item,
1168                                              reg_src, &reg_dst, res,
1169                                              MLX5_MODIFICATION_TYPE_COPY,
1170                                              error);
1171 }
1172
1173 /**
1174  * Convert MARK action to DV specification. This routine is used
1175  * in extensive metadata only and requires metadata register to be
1176  * handled. In legacy mode hardware tag resource is engaged.
1177  *
1178  * @param[in] dev
1179  *   Pointer to the rte_eth_dev structure.
1180  * @param[in] conf
1181  *   Pointer to MARK action specification.
1182  * @param[in,out] resource
1183  *   Pointer to the modify-header resource.
1184  * @param[out] error
1185  *   Pointer to the error structure.
1186  *
1187  * @return
1188  *   0 on success, a negative errno value otherwise and rte_errno is set.
1189  */
1190 static int
1191 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1192                             const struct rte_flow_action_mark *conf,
1193                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1194                             struct rte_flow_error *error)
1195 {
1196         struct mlx5_priv *priv = dev->data->dev_private;
1197         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1198                                            priv->sh->dv_mark_mask);
1199         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1200         struct rte_flow_item item = {
1201                 .spec = &data,
1202                 .mask = &mask,
1203         };
1204         struct field_modify_info reg_c_x[] = {
1205                 [1] = {0, 0, 0},
1206         };
1207         int reg;
1208
1209         if (!mask)
1210                 return rte_flow_error_set(error, EINVAL,
1211                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1212                                           NULL, "zero mark action mask");
1213         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1214         if (reg < 0)
1215                 return reg;
1216         MLX5_ASSERT(reg > 0);
1217         if (reg == REG_C_0) {
1218                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1219                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1220
1221                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1222                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1223                 mask = rte_cpu_to_be_32(mask << shl_c0);
1224         }
1225         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1226         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1227                                              MLX5_MODIFICATION_TYPE_SET, error);
1228 }
1229
1230 /**
1231  * Get metadata register index for specified steering domain.
1232  *
1233  * @param[in] dev
1234  *   Pointer to the rte_eth_dev structure.
1235  * @param[in] attr
1236  *   Attributes of flow to determine steering domain.
1237  * @param[out] error
1238  *   Pointer to the error structure.
1239  *
1240  * @return
1241  *   positive index on success, a negative errno value otherwise
1242  *   and rte_errno is set.
1243  */
1244 static enum modify_reg
1245 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1246                          const struct rte_flow_attr *attr,
1247                          struct rte_flow_error *error)
1248 {
1249         int reg =
1250                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1251                                           MLX5_METADATA_FDB :
1252                                             attr->egress ?
1253                                             MLX5_METADATA_TX :
1254                                             MLX5_METADATA_RX, 0, error);
1255         if (reg < 0)
1256                 return rte_flow_error_set(error,
1257                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1258                                           NULL, "unavailable "
1259                                           "metadata register");
1260         return reg;
1261 }
1262
1263 /**
1264  * Convert SET_META action to DV specification.
1265  *
1266  * @param[in] dev
1267  *   Pointer to the rte_eth_dev structure.
1268  * @param[in,out] resource
1269  *   Pointer to the modify-header resource.
1270  * @param[in] attr
1271  *   Attributes of flow that includes this item.
1272  * @param[in] conf
1273  *   Pointer to action specification.
1274  * @param[out] error
1275  *   Pointer to the error structure.
1276  *
1277  * @return
1278  *   0 on success, a negative errno value otherwise and rte_errno is set.
1279  */
1280 static int
1281 flow_dv_convert_action_set_meta
1282                         (struct rte_eth_dev *dev,
1283                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1284                          const struct rte_flow_attr *attr,
1285                          const struct rte_flow_action_set_meta *conf,
1286                          struct rte_flow_error *error)
1287 {
1288         uint32_t mask = rte_cpu_to_be_32(conf->mask);
1289         uint32_t data = rte_cpu_to_be_32(conf->data) & mask;
1290         struct rte_flow_item item = {
1291                 .spec = &data,
1292                 .mask = &mask,
1293         };
1294         struct field_modify_info reg_c_x[] = {
1295                 [1] = {0, 0, 0},
1296         };
1297         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1298
1299         if (reg < 0)
1300                 return reg;
1301         MLX5_ASSERT(reg != REG_NON);
1302         if (reg == REG_C_0) {
1303                 struct mlx5_priv *priv = dev->data->dev_private;
1304                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1305                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1306
1307                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1308                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1309                 mask = rte_cpu_to_be_32(mask << shl_c0);
1310         }
1311         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1312         /* The routine expects parameters in memory as big-endian ones. */
1313         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1314                                              MLX5_MODIFICATION_TYPE_SET, error);
1315 }
1316
1317 /**
1318  * Convert modify-header set IPv4 DSCP action to DV specification.
1319  *
1320  * @param[in,out] resource
1321  *   Pointer to the modify-header resource.
1322  * @param[in] action
1323  *   Pointer to action specification.
1324  * @param[out] error
1325  *   Pointer to the error structure.
1326  *
1327  * @return
1328  *   0 on success, a negative errno value otherwise and rte_errno is set.
1329  */
1330 static int
1331 flow_dv_convert_action_modify_ipv4_dscp
1332                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1333                          const struct rte_flow_action *action,
1334                          struct rte_flow_error *error)
1335 {
1336         const struct rte_flow_action_set_dscp *conf =
1337                 (const struct rte_flow_action_set_dscp *)(action->conf);
1338         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1339         struct rte_flow_item_ipv4 ipv4;
1340         struct rte_flow_item_ipv4 ipv4_mask;
1341
1342         memset(&ipv4, 0, sizeof(ipv4));
1343         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1344         ipv4.hdr.type_of_service = conf->dscp;
1345         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1346         item.spec = &ipv4;
1347         item.mask = &ipv4_mask;
1348         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1349                                              MLX5_MODIFICATION_TYPE_SET, error);
1350 }
1351
1352 /**
1353  * Convert modify-header set IPv6 DSCP action to DV specification.
1354  *
1355  * @param[in,out] resource
1356  *   Pointer to the modify-header resource.
1357  * @param[in] action
1358  *   Pointer to action specification.
1359  * @param[out] error
1360  *   Pointer to the error structure.
1361  *
1362  * @return
1363  *   0 on success, a negative errno value otherwise and rte_errno is set.
1364  */
1365 static int
1366 flow_dv_convert_action_modify_ipv6_dscp
1367                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1368                          const struct rte_flow_action *action,
1369                          struct rte_flow_error *error)
1370 {
1371         const struct rte_flow_action_set_dscp *conf =
1372                 (const struct rte_flow_action_set_dscp *)(action->conf);
1373         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1374         struct rte_flow_item_ipv6 ipv6;
1375         struct rte_flow_item_ipv6 ipv6_mask;
1376
1377         memset(&ipv6, 0, sizeof(ipv6));
1378         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1379         /*
1380          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1381          * rdma-core only accept the DSCP bits byte aligned start from
1382          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1383          * bits in IPv6 case as rdma-core requires byte aligned value.
1384          */
1385         ipv6.hdr.vtc_flow = conf->dscp;
1386         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1387         item.spec = &ipv6;
1388         item.mask = &ipv6_mask;
1389         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1390                                              MLX5_MODIFICATION_TYPE_SET, error);
1391 }
1392
1393 static int
1394 mlx5_flow_item_field_width(struct rte_eth_dev *dev,
1395                            enum rte_flow_field_id field, int inherit,
1396                            const struct rte_flow_attr *attr,
1397                            struct rte_flow_error *error)
1398 {
1399         struct mlx5_priv *priv = dev->data->dev_private;
1400
1401         switch (field) {
1402         case RTE_FLOW_FIELD_START:
1403                 return 32;
1404         case RTE_FLOW_FIELD_MAC_DST:
1405         case RTE_FLOW_FIELD_MAC_SRC:
1406                 return 48;
1407         case RTE_FLOW_FIELD_VLAN_TYPE:
1408                 return 16;
1409         case RTE_FLOW_FIELD_VLAN_ID:
1410                 return 12;
1411         case RTE_FLOW_FIELD_MAC_TYPE:
1412                 return 16;
1413         case RTE_FLOW_FIELD_IPV4_DSCP:
1414                 return 6;
1415         case RTE_FLOW_FIELD_IPV4_TTL:
1416                 return 8;
1417         case RTE_FLOW_FIELD_IPV4_SRC:
1418         case RTE_FLOW_FIELD_IPV4_DST:
1419                 return 32;
1420         case RTE_FLOW_FIELD_IPV6_DSCP:
1421                 return 6;
1422         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1423                 return 8;
1424         case RTE_FLOW_FIELD_IPV6_SRC:
1425         case RTE_FLOW_FIELD_IPV6_DST:
1426                 return 128;
1427         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1428         case RTE_FLOW_FIELD_TCP_PORT_DST:
1429                 return 16;
1430         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1431         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1432                 return 32;
1433         case RTE_FLOW_FIELD_TCP_FLAGS:
1434                 return 9;
1435         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1436         case RTE_FLOW_FIELD_UDP_PORT_DST:
1437                 return 16;
1438         case RTE_FLOW_FIELD_VXLAN_VNI:
1439         case RTE_FLOW_FIELD_GENEVE_VNI:
1440                 return 24;
1441         case RTE_FLOW_FIELD_GTP_TEID:
1442         case RTE_FLOW_FIELD_TAG:
1443                 return 32;
1444         case RTE_FLOW_FIELD_MARK:
1445                 return __builtin_popcount(priv->sh->dv_mark_mask);
1446         case RTE_FLOW_FIELD_META:
1447                 return (flow_dv_get_metadata_reg(dev, attr, error) == REG_C_0) ?
1448                         __builtin_popcount(priv->sh->dv_meta_mask) : 32;
1449         case RTE_FLOW_FIELD_POINTER:
1450         case RTE_FLOW_FIELD_VALUE:
1451                 return inherit < 0 ? 0 : inherit;
1452         default:
1453                 MLX5_ASSERT(false);
1454         }
1455         return 0;
1456 }
1457
1458 static void
1459 mlx5_flow_field_id_to_modify_info
1460                 (const struct rte_flow_action_modify_data *data,
1461                  struct field_modify_info *info, uint32_t *mask,
1462                  uint32_t width, struct rte_eth_dev *dev,
1463                  const struct rte_flow_attr *attr, struct rte_flow_error *error)
1464 {
1465         struct mlx5_priv *priv = dev->data->dev_private;
1466         uint32_t idx = 0;
1467         uint32_t off = 0;
1468
1469         switch (data->field) {
1470         case RTE_FLOW_FIELD_START:
1471                 /* not supported yet */
1472                 MLX5_ASSERT(false);
1473                 break;
1474         case RTE_FLOW_FIELD_MAC_DST:
1475                 off = data->offset > 16 ? data->offset - 16 : 0;
1476                 if (mask) {
1477                         if (data->offset < 16) {
1478                                 info[idx] = (struct field_modify_info){2, 4,
1479                                                 MLX5_MODI_OUT_DMAC_15_0};
1480                                 if (width < 16) {
1481                                         mask[1] = rte_cpu_to_be_16(0xffff >>
1482                                                                  (16 - width));
1483                                         width = 0;
1484                                 } else {
1485                                         mask[1] = RTE_BE16(0xffff);
1486                                         width -= 16;
1487                                 }
1488                                 if (!width)
1489                                         break;
1490                                 ++idx;
1491                         }
1492                         info[idx] = (struct field_modify_info){4, 0,
1493                                                 MLX5_MODI_OUT_DMAC_47_16};
1494                         mask[0] = rte_cpu_to_be_32((0xffffffff >>
1495                                                     (32 - width)) << off);
1496                 } else {
1497                         if (data->offset < 16)
1498                                 info[idx++] = (struct field_modify_info){2, 0,
1499                                                 MLX5_MODI_OUT_DMAC_15_0};
1500                         info[idx] = (struct field_modify_info){4, off,
1501                                                 MLX5_MODI_OUT_DMAC_47_16};
1502                 }
1503                 break;
1504         case RTE_FLOW_FIELD_MAC_SRC:
1505                 off = data->offset > 16 ? data->offset - 16 : 0;
1506                 if (mask) {
1507                         if (data->offset < 16) {
1508                                 info[idx] = (struct field_modify_info){2, 4,
1509                                                 MLX5_MODI_OUT_SMAC_15_0};
1510                                 if (width < 16) {
1511                                         mask[1] = rte_cpu_to_be_16(0xffff >>
1512                                                                  (16 - width));
1513                                         width = 0;
1514                                 } else {
1515                                         mask[1] = RTE_BE16(0xffff);
1516                                         width -= 16;
1517                                 }
1518                                 if (!width)
1519                                         break;
1520                                 ++idx;
1521                         }
1522                         info[idx] = (struct field_modify_info){4, 0,
1523                                                 MLX5_MODI_OUT_SMAC_47_16};
1524                         mask[0] = rte_cpu_to_be_32((0xffffffff >>
1525                                                     (32 - width)) << off);
1526                 } else {
1527                         if (data->offset < 16)
1528                                 info[idx++] = (struct field_modify_info){2, 0,
1529                                                 MLX5_MODI_OUT_SMAC_15_0};
1530                         info[idx] = (struct field_modify_info){4, off,
1531                                                 MLX5_MODI_OUT_SMAC_47_16};
1532                 }
1533                 break;
1534         case RTE_FLOW_FIELD_VLAN_TYPE:
1535                 /* not supported yet */
1536                 break;
1537         case RTE_FLOW_FIELD_VLAN_ID:
1538                 info[idx] = (struct field_modify_info){2, 0,
1539                                         MLX5_MODI_OUT_FIRST_VID};
1540                 if (mask)
1541                         mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1542                 break;
1543         case RTE_FLOW_FIELD_MAC_TYPE:
1544                 info[idx] = (struct field_modify_info){2, 0,
1545                                         MLX5_MODI_OUT_ETHERTYPE};
1546                 if (mask)
1547                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1548                 break;
1549         case RTE_FLOW_FIELD_IPV4_DSCP:
1550                 info[idx] = (struct field_modify_info){1, 0,
1551                                         MLX5_MODI_OUT_IP_DSCP};
1552                 if (mask)
1553                         mask[idx] = 0x3f >> (6 - width);
1554                 break;
1555         case RTE_FLOW_FIELD_IPV4_TTL:
1556                 info[idx] = (struct field_modify_info){1, 0,
1557                                         MLX5_MODI_OUT_IPV4_TTL};
1558                 if (mask)
1559                         mask[idx] = 0xff >> (8 - width);
1560                 break;
1561         case RTE_FLOW_FIELD_IPV4_SRC:
1562                 info[idx] = (struct field_modify_info){4, 0,
1563                                         MLX5_MODI_OUT_SIPV4};
1564                 if (mask)
1565                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1566                                                      (32 - width));
1567                 break;
1568         case RTE_FLOW_FIELD_IPV4_DST:
1569                 info[idx] = (struct field_modify_info){4, 0,
1570                                         MLX5_MODI_OUT_DIPV4};
1571                 if (mask)
1572                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1573                                                      (32 - width));
1574                 break;
1575         case RTE_FLOW_FIELD_IPV6_DSCP:
1576                 info[idx] = (struct field_modify_info){1, 0,
1577                                         MLX5_MODI_OUT_IP_DSCP};
1578                 if (mask)
1579                         mask[idx] = 0x3f >> (6 - width);
1580                 break;
1581         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1582                 info[idx] = (struct field_modify_info){1, 0,
1583                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1584                 if (mask)
1585                         mask[idx] = 0xff >> (8 - width);
1586                 break;
1587         case RTE_FLOW_FIELD_IPV6_SRC:
1588                 if (mask) {
1589                         if (data->offset < 32) {
1590                                 info[idx] = (struct field_modify_info){4, 12,
1591                                                 MLX5_MODI_OUT_SIPV6_31_0};
1592                                 if (width < 32) {
1593                                         mask[3] =
1594                                                 rte_cpu_to_be_32(0xffffffff >>
1595                                                                  (32 - width));
1596                                         width = 0;
1597                                 } else {
1598                                         mask[3] = RTE_BE32(0xffffffff);
1599                                         width -= 32;
1600                                 }
1601                                 if (!width)
1602                                         break;
1603                                 ++idx;
1604                         }
1605                         if (data->offset < 64) {
1606                                 info[idx] = (struct field_modify_info){4, 8,
1607                                                 MLX5_MODI_OUT_SIPV6_63_32};
1608                                 if (width < 32) {
1609                                         mask[2] =
1610                                                 rte_cpu_to_be_32(0xffffffff >>
1611                                                                  (32 - width));
1612                                         width = 0;
1613                                 } else {
1614                                         mask[2] = RTE_BE32(0xffffffff);
1615                                         width -= 32;
1616                                 }
1617                                 if (!width)
1618                                         break;
1619                                 ++idx;
1620                         }
1621                         if (data->offset < 96) {
1622                                 info[idx] = (struct field_modify_info){4, 4,
1623                                                 MLX5_MODI_OUT_SIPV6_95_64};
1624                                 if (width < 32) {
1625                                         mask[1] =
1626                                                 rte_cpu_to_be_32(0xffffffff >>
1627                                                                  (32 - width));
1628                                         width = 0;
1629                                 } else {
1630                                         mask[1] = RTE_BE32(0xffffffff);
1631                                         width -= 32;
1632                                 }
1633                                 if (!width)
1634                                         break;
1635                                 ++idx;
1636                         }
1637                         info[idx] = (struct field_modify_info){4, 0,
1638                                                 MLX5_MODI_OUT_SIPV6_127_96};
1639                         mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
1640                 } else {
1641                         if (data->offset < 32)
1642                                 info[idx++] = (struct field_modify_info){4, 0,
1643                                                 MLX5_MODI_OUT_SIPV6_31_0};
1644                         if (data->offset < 64)
1645                                 info[idx++] = (struct field_modify_info){4, 0,
1646                                                 MLX5_MODI_OUT_SIPV6_63_32};
1647                         if (data->offset < 96)
1648                                 info[idx++] = (struct field_modify_info){4, 0,
1649                                                 MLX5_MODI_OUT_SIPV6_95_64};
1650                         if (data->offset < 128)
1651                                 info[idx++] = (struct field_modify_info){4, 0,
1652                                                 MLX5_MODI_OUT_SIPV6_127_96};
1653                 }
1654                 break;
1655         case RTE_FLOW_FIELD_IPV6_DST:
1656                 if (mask) {
1657                         if (data->offset < 32) {
1658                                 info[idx] = (struct field_modify_info){4, 12,
1659                                                 MLX5_MODI_OUT_DIPV6_31_0};
1660                                 if (width < 32) {
1661                                         mask[3] =
1662                                                 rte_cpu_to_be_32(0xffffffff >>
1663                                                                  (32 - width));
1664                                         width = 0;
1665                                 } else {
1666                                         mask[3] = RTE_BE32(0xffffffff);
1667                                         width -= 32;
1668                                 }
1669                                 if (!width)
1670                                         break;
1671                                 ++idx;
1672                         }
1673                         if (data->offset < 64) {
1674                                 info[idx] = (struct field_modify_info){4, 8,
1675                                                 MLX5_MODI_OUT_DIPV6_63_32};
1676                                 if (width < 32) {
1677                                         mask[2] =
1678                                                 rte_cpu_to_be_32(0xffffffff >>
1679                                                                  (32 - width));
1680                                         width = 0;
1681                                 } else {
1682                                         mask[2] = RTE_BE32(0xffffffff);
1683                                         width -= 32;
1684                                 }
1685                                 if (!width)
1686                                         break;
1687                                 ++idx;
1688                         }
1689                         if (data->offset < 96) {
1690                                 info[idx] = (struct field_modify_info){4, 4,
1691                                                 MLX5_MODI_OUT_DIPV6_95_64};
1692                                 if (width < 32) {
1693                                         mask[1] =
1694                                                 rte_cpu_to_be_32(0xffffffff >>
1695                                                                  (32 - width));
1696                                         width = 0;
1697                                 } else {
1698                                         mask[1] = RTE_BE32(0xffffffff);
1699                                         width -= 32;
1700                                 }
1701                                 if (!width)
1702                                         break;
1703                                 ++idx;
1704                         }
1705                         info[idx] = (struct field_modify_info){4, 0,
1706                                                 MLX5_MODI_OUT_DIPV6_127_96};
1707                         mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
1708                 } else {
1709                         if (data->offset < 32)
1710                                 info[idx++] = (struct field_modify_info){4, 0,
1711                                                 MLX5_MODI_OUT_DIPV6_31_0};
1712                         if (data->offset < 64)
1713                                 info[idx++] = (struct field_modify_info){4, 0,
1714                                                 MLX5_MODI_OUT_DIPV6_63_32};
1715                         if (data->offset < 96)
1716                                 info[idx++] = (struct field_modify_info){4, 0,
1717                                                 MLX5_MODI_OUT_DIPV6_95_64};
1718                         if (data->offset < 128)
1719                                 info[idx++] = (struct field_modify_info){4, 0,
1720                                                 MLX5_MODI_OUT_DIPV6_127_96};
1721                 }
1722                 break;
1723         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1724                 info[idx] = (struct field_modify_info){2, 0,
1725                                         MLX5_MODI_OUT_TCP_SPORT};
1726                 if (mask)
1727                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1728                 break;
1729         case RTE_FLOW_FIELD_TCP_PORT_DST:
1730                 info[idx] = (struct field_modify_info){2, 0,
1731                                         MLX5_MODI_OUT_TCP_DPORT};
1732                 if (mask)
1733                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1734                 break;
1735         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1736                 info[idx] = (struct field_modify_info){4, 0,
1737                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1738                 if (mask)
1739                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1740                                                      (32 - width));
1741                 break;
1742         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1743                 info[idx] = (struct field_modify_info){4, 0,
1744                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1745                 if (mask)
1746                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1747                                                      (32 - width));
1748                 break;
1749         case RTE_FLOW_FIELD_TCP_FLAGS:
1750                 info[idx] = (struct field_modify_info){2, 0,
1751                                         MLX5_MODI_OUT_TCP_FLAGS};
1752                 if (mask)
1753                         mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
1754                 break;
1755         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1756                 info[idx] = (struct field_modify_info){2, 0,
1757                                         MLX5_MODI_OUT_UDP_SPORT};
1758                 if (mask)
1759                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1760                 break;
1761         case RTE_FLOW_FIELD_UDP_PORT_DST:
1762                 info[idx] = (struct field_modify_info){2, 0,
1763                                         MLX5_MODI_OUT_UDP_DPORT};
1764                 if (mask)
1765                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1766                 break;
1767         case RTE_FLOW_FIELD_VXLAN_VNI:
1768                 /* not supported yet */
1769                 break;
1770         case RTE_FLOW_FIELD_GENEVE_VNI:
1771                 /* not supported yet*/
1772                 break;
1773         case RTE_FLOW_FIELD_GTP_TEID:
1774                 info[idx] = (struct field_modify_info){4, 0,
1775                                         MLX5_MODI_GTP_TEID};
1776                 if (mask)
1777                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1778                                                      (32 - width));
1779                 break;
1780         case RTE_FLOW_FIELD_TAG:
1781                 {
1782                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1783                                                    data->level, error);
1784                         if (reg < 0)
1785                                 return;
1786                         MLX5_ASSERT(reg != REG_NON);
1787                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1788                         info[idx] = (struct field_modify_info){4, 0,
1789                                                 reg_to_field[reg]};
1790                         if (mask)
1791                                 mask[idx] =
1792                                         rte_cpu_to_be_32(0xffffffff >>
1793                                                          (32 - width));
1794                 }
1795                 break;
1796         case RTE_FLOW_FIELD_MARK:
1797                 {
1798                         uint32_t mark_mask = priv->sh->dv_mark_mask;
1799                         uint32_t mark_count = __builtin_popcount(mark_mask);
1800                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1801                                                        0, error);
1802                         if (reg < 0)
1803                                 return;
1804                         MLX5_ASSERT(reg != REG_NON);
1805                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1806                         info[idx] = (struct field_modify_info){4, 0,
1807                                                 reg_to_field[reg]};
1808                         if (mask)
1809                                 mask[idx] = rte_cpu_to_be_32((mark_mask >>
1810                                          (mark_count - width)) & mark_mask);
1811                 }
1812                 break;
1813         case RTE_FLOW_FIELD_META:
1814                 {
1815                         uint32_t meta_mask = priv->sh->dv_meta_mask;
1816                         uint32_t meta_count = __builtin_popcount(meta_mask);
1817                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1818                         if (reg < 0)
1819                                 return;
1820                         MLX5_ASSERT(reg != REG_NON);
1821                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1822                         info[idx] = (struct field_modify_info){4, 0,
1823                                                 reg_to_field[reg]};
1824                         if (mask)
1825                                 mask[idx] = rte_cpu_to_be_32((meta_mask >>
1826                                         (meta_count - width)) & meta_mask);
1827                 }
1828                 break;
1829         case RTE_FLOW_FIELD_POINTER:
1830         case RTE_FLOW_FIELD_VALUE:
1831         default:
1832                 MLX5_ASSERT(false);
1833                 break;
1834         }
1835 }
1836
1837 /**
1838  * Convert modify_field action to DV specification.
1839  *
1840  * @param[in] dev
1841  *   Pointer to the rte_eth_dev structure.
1842  * @param[in,out] resource
1843  *   Pointer to the modify-header resource.
1844  * @param[in] action
1845  *   Pointer to action specification.
1846  * @param[in] attr
1847  *   Attributes of flow that includes this item.
1848  * @param[out] error
1849  *   Pointer to the error structure.
1850  *
1851  * @return
1852  *   0 on success, a negative errno value otherwise and rte_errno is set.
1853  */
1854 static int
1855 flow_dv_convert_action_modify_field
1856                         (struct rte_eth_dev *dev,
1857                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1858                          const struct rte_flow_action *action,
1859                          const struct rte_flow_attr *attr,
1860                          struct rte_flow_error *error)
1861 {
1862         const struct rte_flow_action_modify_field *conf =
1863                 (const struct rte_flow_action_modify_field *)(action->conf);
1864         struct rte_flow_item item = {
1865                 .spec = NULL,
1866                 .mask = NULL
1867         };
1868         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1869                                                                 {0, 0, 0} };
1870         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1871                                                                 {0, 0, 0} };
1872         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1873         uint32_t type, meta = 0;
1874
1875         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1876             conf->src.field == RTE_FLOW_FIELD_VALUE) {
1877                 type = MLX5_MODIFICATION_TYPE_SET;
1878                 /** For SET fill the destination field (field) first. */
1879                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1880                                                   conf->width, dev,
1881                                                   attr, error);
1882                 item.spec = conf->src.field == RTE_FLOW_FIELD_POINTER ?
1883                                         (void *)(uintptr_t)conf->src.pvalue :
1884                                         (void *)(uintptr_t)&conf->src.value;
1885                 if (conf->dst.field == RTE_FLOW_FIELD_META) {
1886                         meta = *(const unaligned_uint32_t *)item.spec;
1887                         meta = rte_cpu_to_be_32(meta);
1888                         item.spec = &meta;
1889                 }
1890         } else {
1891                 type = MLX5_MODIFICATION_TYPE_COPY;
1892                 /** For COPY fill the destination field (dcopy) without mask. */
1893                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1894                                                   conf->width, dev,
1895                                                   attr, error);
1896                 /** Then construct the source field (field) with mask. */
1897                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1898                                                   conf->width, dev,
1899                                                   attr, error);
1900         }
1901         item.mask = &mask;
1902         return flow_dv_convert_modify_action(&item,
1903                         field, dcopy, resource, type, error);
1904 }
1905
1906 /**
1907  * Validate MARK item.
1908  *
1909  * @param[in] dev
1910  *   Pointer to the rte_eth_dev structure.
1911  * @param[in] item
1912  *   Item specification.
1913  * @param[in] attr
1914  *   Attributes of flow that includes this item.
1915  * @param[out] error
1916  *   Pointer to error structure.
1917  *
1918  * @return
1919  *   0 on success, a negative errno value otherwise and rte_errno is set.
1920  */
1921 static int
1922 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1923                            const struct rte_flow_item *item,
1924                            const struct rte_flow_attr *attr __rte_unused,
1925                            struct rte_flow_error *error)
1926 {
1927         struct mlx5_priv *priv = dev->data->dev_private;
1928         struct mlx5_sh_config *config = &priv->sh->config;
1929         const struct rte_flow_item_mark *spec = item->spec;
1930         const struct rte_flow_item_mark *mask = item->mask;
1931         const struct rte_flow_item_mark nic_mask = {
1932                 .id = priv->sh->dv_mark_mask,
1933         };
1934         int ret;
1935
1936         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1937                 return rte_flow_error_set(error, ENOTSUP,
1938                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1939                                           "extended metadata feature"
1940                                           " isn't enabled");
1941         if (!mlx5_flow_ext_mreg_supported(dev))
1942                 return rte_flow_error_set(error, ENOTSUP,
1943                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1944                                           "extended metadata register"
1945                                           " isn't supported");
1946         if (!nic_mask.id)
1947                 return rte_flow_error_set(error, ENOTSUP,
1948                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1949                                           "extended metadata register"
1950                                           " isn't available");
1951         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1952         if (ret < 0)
1953                 return ret;
1954         if (!spec)
1955                 return rte_flow_error_set(error, EINVAL,
1956                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1957                                           item->spec,
1958                                           "data cannot be empty");
1959         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1960                 return rte_flow_error_set(error, EINVAL,
1961                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1962                                           &spec->id,
1963                                           "mark id exceeds the limit");
1964         if (!mask)
1965                 mask = &nic_mask;
1966         if (!mask->id)
1967                 return rte_flow_error_set(error, EINVAL,
1968                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1969                                         "mask cannot be zero");
1970
1971         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1972                                         (const uint8_t *)&nic_mask,
1973                                         sizeof(struct rte_flow_item_mark),
1974                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1975         if (ret < 0)
1976                 return ret;
1977         return 0;
1978 }
1979
1980 /**
1981  * Validate META item.
1982  *
1983  * @param[in] dev
1984  *   Pointer to the rte_eth_dev structure.
1985  * @param[in] item
1986  *   Item specification.
1987  * @param[in] attr
1988  *   Attributes of flow that includes this item.
1989  * @param[out] error
1990  *   Pointer to error structure.
1991  *
1992  * @return
1993  *   0 on success, a negative errno value otherwise and rte_errno is set.
1994  */
1995 static int
1996 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1997                            const struct rte_flow_item *item,
1998                            const struct rte_flow_attr *attr,
1999                            struct rte_flow_error *error)
2000 {
2001         struct mlx5_priv *priv = dev->data->dev_private;
2002         struct mlx5_sh_config *config = &priv->sh->config;
2003         const struct rte_flow_item_meta *spec = item->spec;
2004         const struct rte_flow_item_meta *mask = item->mask;
2005         struct rte_flow_item_meta nic_mask = {
2006                 .data = UINT32_MAX
2007         };
2008         int reg;
2009         int ret;
2010
2011         if (!spec)
2012                 return rte_flow_error_set(error, EINVAL,
2013                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2014                                           item->spec,
2015                                           "data cannot be empty");
2016         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2017                 if (!mlx5_flow_ext_mreg_supported(dev))
2018                         return rte_flow_error_set(error, ENOTSUP,
2019                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2020                                           "extended metadata register"
2021                                           " isn't supported");
2022                 reg = flow_dv_get_metadata_reg(dev, attr, error);
2023                 if (reg < 0)
2024                         return reg;
2025                 if (reg == REG_NON)
2026                         return rte_flow_error_set(error, ENOTSUP,
2027                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2028                                         "unavailable extended metadata register");
2029                 if (reg == REG_B)
2030                         return rte_flow_error_set(error, ENOTSUP,
2031                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2032                                           "match on reg_b "
2033                                           "isn't supported");
2034                 if (reg != REG_A)
2035                         nic_mask.data = priv->sh->dv_meta_mask;
2036         } else {
2037                 if (attr->transfer)
2038                         return rte_flow_error_set(error, ENOTSUP,
2039                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2040                                         "extended metadata feature "
2041                                         "should be enabled when "
2042                                         "meta item is requested "
2043                                         "with e-switch mode ");
2044                 if (attr->ingress)
2045                         return rte_flow_error_set(error, ENOTSUP,
2046                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2047                                         "match on metadata for ingress "
2048                                         "is not supported in legacy "
2049                                         "metadata mode");
2050         }
2051         if (!mask)
2052                 mask = &rte_flow_item_meta_mask;
2053         if (!mask->data)
2054                 return rte_flow_error_set(error, EINVAL,
2055                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2056                                         "mask cannot be zero");
2057
2058         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2059                                         (const uint8_t *)&nic_mask,
2060                                         sizeof(struct rte_flow_item_meta),
2061                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2062         return ret;
2063 }
2064
2065 /**
2066  * Validate TAG item.
2067  *
2068  * @param[in] dev
2069  *   Pointer to the rte_eth_dev structure.
2070  * @param[in] item
2071  *   Item specification.
2072  * @param[in] attr
2073  *   Attributes of flow that includes this item.
2074  * @param[out] error
2075  *   Pointer to error structure.
2076  *
2077  * @return
2078  *   0 on success, a negative errno value otherwise and rte_errno is set.
2079  */
2080 static int
2081 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2082                           const struct rte_flow_item *item,
2083                           const struct rte_flow_attr *attr __rte_unused,
2084                           struct rte_flow_error *error)
2085 {
2086         const struct rte_flow_item_tag *spec = item->spec;
2087         const struct rte_flow_item_tag *mask = item->mask;
2088         const struct rte_flow_item_tag nic_mask = {
2089                 .data = RTE_BE32(UINT32_MAX),
2090                 .index = 0xff,
2091         };
2092         int ret;
2093
2094         if (!mlx5_flow_ext_mreg_supported(dev))
2095                 return rte_flow_error_set(error, ENOTSUP,
2096                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2097                                           "extensive metadata register"
2098                                           " isn't supported");
2099         if (!spec)
2100                 return rte_flow_error_set(error, EINVAL,
2101                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2102                                           item->spec,
2103                                           "data cannot be empty");
2104         if (!mask)
2105                 mask = &rte_flow_item_tag_mask;
2106         if (!mask->data)
2107                 return rte_flow_error_set(error, EINVAL,
2108                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2109                                         "mask cannot be zero");
2110
2111         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2112                                         (const uint8_t *)&nic_mask,
2113                                         sizeof(struct rte_flow_item_tag),
2114                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2115         if (ret < 0)
2116                 return ret;
2117         if (mask->index != 0xff)
2118                 return rte_flow_error_set(error, EINVAL,
2119                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2120                                           "partial mask for tag index"
2121                                           " is not supported");
2122         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2123         if (ret < 0)
2124                 return ret;
2125         MLX5_ASSERT(ret != REG_NON);
2126         return 0;
2127 }
2128
2129 /**
2130  * Validate vport item.
2131  *
2132  * @param[in] dev
2133  *   Pointer to the rte_eth_dev structure.
2134  * @param[in] item
2135  *   Item specification.
2136  * @param[in] attr
2137  *   Attributes of flow that includes this item.
2138  * @param[in] item_flags
2139  *   Bit-fields that holds the items detected until now.
2140  * @param[out] error
2141  *   Pointer to error structure.
2142  *
2143  * @return
2144  *   0 on success, a negative errno value otherwise and rte_errno is set.
2145  */
2146 static int
2147 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2148                               const struct rte_flow_item *item,
2149                               const struct rte_flow_attr *attr,
2150                               uint64_t item_flags,
2151                               struct rte_flow_error *error)
2152 {
2153         const struct rte_flow_item_port_id *spec = item->spec;
2154         const struct rte_flow_item_port_id *mask = item->mask;
2155         const struct rte_flow_item_port_id switch_mask = {
2156                         .id = 0xffffffff,
2157         };
2158         struct mlx5_priv *esw_priv;
2159         struct mlx5_priv *dev_priv;
2160         int ret;
2161
2162         if (!attr->transfer)
2163                 return rte_flow_error_set(error, EINVAL,
2164                                           RTE_FLOW_ERROR_TYPE_ITEM,
2165                                           NULL,
2166                                           "match on port id is valid only"
2167                                           " when transfer flag is enabled");
2168         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2169                 return rte_flow_error_set(error, ENOTSUP,
2170                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2171                                           "multiple source ports are not"
2172                                           " supported");
2173         if (!mask)
2174                 mask = &switch_mask;
2175         if (mask->id != 0xffffffff)
2176                 return rte_flow_error_set(error, ENOTSUP,
2177                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2178                                            mask,
2179                                            "no support for partial mask on"
2180                                            " \"id\" field");
2181         ret = mlx5_flow_item_acceptable
2182                                 (item, (const uint8_t *)mask,
2183                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2184                                  sizeof(struct rte_flow_item_port_id),
2185                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2186         if (ret)
2187                 return ret;
2188         if (!spec)
2189                 return 0;
2190         if (spec->id == MLX5_PORT_ESW_MGR)
2191                 return 0;
2192         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2193         if (!esw_priv)
2194                 return rte_flow_error_set(error, rte_errno,
2195                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2196                                           "failed to obtain E-Switch info for"
2197                                           " port");
2198         dev_priv = mlx5_dev_to_eswitch_info(dev);
2199         if (!dev_priv)
2200                 return rte_flow_error_set(error, rte_errno,
2201                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2202                                           NULL,
2203                                           "failed to obtain E-Switch info");
2204         if (esw_priv->domain_id != dev_priv->domain_id)
2205                 return rte_flow_error_set(error, EINVAL,
2206                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2207                                           "cannot match on a port from a"
2208                                           " different E-Switch");
2209         return 0;
2210 }
2211
2212 /**
2213  * Validate VLAN item.
2214  *
2215  * @param[in] item
2216  *   Item specification.
2217  * @param[in] item_flags
2218  *   Bit-fields that holds the items detected until now.
2219  * @param[in] dev
2220  *   Ethernet device flow is being created on.
2221  * @param[out] error
2222  *   Pointer to error structure.
2223  *
2224  * @return
2225  *   0 on success, a negative errno value otherwise and rte_errno is set.
2226  */
2227 static int
2228 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2229                            uint64_t item_flags,
2230                            struct rte_eth_dev *dev,
2231                            struct rte_flow_error *error)
2232 {
2233         const struct rte_flow_item_vlan *mask = item->mask;
2234         const struct rte_flow_item_vlan nic_mask = {
2235                 .tci = RTE_BE16(UINT16_MAX),
2236                 .inner_type = RTE_BE16(UINT16_MAX),
2237                 .has_more_vlan = 1,
2238         };
2239         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2240         int ret;
2241         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2242                                         MLX5_FLOW_LAYER_INNER_L4) :
2243                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2244                                         MLX5_FLOW_LAYER_OUTER_L4);
2245         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2246                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2247
2248         if (item_flags & vlanm)
2249                 return rte_flow_error_set(error, EINVAL,
2250                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2251                                           "multiple VLAN layers not supported");
2252         else if ((item_flags & l34m) != 0)
2253                 return rte_flow_error_set(error, EINVAL,
2254                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2255                                           "VLAN cannot follow L3/L4 layer");
2256         if (!mask)
2257                 mask = &rte_flow_item_vlan_mask;
2258         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2259                                         (const uint8_t *)&nic_mask,
2260                                         sizeof(struct rte_flow_item_vlan),
2261                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2262         if (ret)
2263                 return ret;
2264         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2265                 struct mlx5_priv *priv = dev->data->dev_private;
2266
2267                 if (priv->vmwa_context) {
2268                         /*
2269                          * Non-NULL context means we have a virtual machine
2270                          * and SR-IOV enabled, we have to create VLAN interface
2271                          * to make hypervisor to setup E-Switch vport
2272                          * context correctly. We avoid creating the multiple
2273                          * VLAN interfaces, so we cannot support VLAN tag mask.
2274                          */
2275                         return rte_flow_error_set(error, EINVAL,
2276                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2277                                                   item,
2278                                                   "VLAN tag mask is not"
2279                                                   " supported in virtual"
2280                                                   " environment");
2281                 }
2282         }
2283         return 0;
2284 }
2285
2286 /*
2287  * GTP flags are contained in 1 byte of the format:
2288  * -------------------------------------------
2289  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2290  * |-----------------------------------------|
2291  * | value | Version | PT | Res | E | S | PN |
2292  * -------------------------------------------
2293  *
2294  * Matching is supported only for GTP flags E, S, PN.
2295  */
2296 #define MLX5_GTP_FLAGS_MASK     0x07
2297
2298 /**
2299  * Validate GTP item.
2300  *
2301  * @param[in] dev
2302  *   Pointer to the rte_eth_dev structure.
2303  * @param[in] item
2304  *   Item specification.
2305  * @param[in] item_flags
2306  *   Bit-fields that holds the items detected until now.
2307  * @param[out] error
2308  *   Pointer to error structure.
2309  *
2310  * @return
2311  *   0 on success, a negative errno value otherwise and rte_errno is set.
2312  */
2313 static int
2314 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2315                           const struct rte_flow_item *item,
2316                           uint64_t item_flags,
2317                           struct rte_flow_error *error)
2318 {
2319         struct mlx5_priv *priv = dev->data->dev_private;
2320         const struct rte_flow_item_gtp *spec = item->spec;
2321         const struct rte_flow_item_gtp *mask = item->mask;
2322         const struct rte_flow_item_gtp nic_mask = {
2323                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2324                 .msg_type = 0xff,
2325                 .teid = RTE_BE32(0xffffffff),
2326         };
2327
2328         if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_gtp)
2329                 return rte_flow_error_set(error, ENOTSUP,
2330                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2331                                           "GTP support is not enabled");
2332         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2333                 return rte_flow_error_set(error, ENOTSUP,
2334                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2335                                           "multiple tunnel layers not"
2336                                           " supported");
2337         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2338                 return rte_flow_error_set(error, EINVAL,
2339                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2340                                           "no outer UDP layer found");
2341         if (!mask)
2342                 mask = &rte_flow_item_gtp_mask;
2343         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2344                 return rte_flow_error_set(error, ENOTSUP,
2345                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2346                                           "Match is supported for GTP"
2347                                           " flags only");
2348         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2349                                          (const uint8_t *)&nic_mask,
2350                                          sizeof(struct rte_flow_item_gtp),
2351                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2352 }
2353
2354 /**
2355  * Validate GTP PSC item.
2356  *
2357  * @param[in] item
2358  *   Item specification.
2359  * @param[in] last_item
2360  *   Previous validated item in the pattern items.
2361  * @param[in] gtp_item
2362  *   Previous GTP item specification.
2363  * @param[in] attr
2364  *   Pointer to flow attributes.
2365  * @param[out] error
2366  *   Pointer to error structure.
2367  *
2368  * @return
2369  *   0 on success, a negative errno value otherwise and rte_errno is set.
2370  */
2371 static int
2372 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2373                               uint64_t last_item,
2374                               const struct rte_flow_item *gtp_item,
2375                               const struct rte_flow_attr *attr,
2376                               struct rte_flow_error *error)
2377 {
2378         const struct rte_flow_item_gtp *gtp_spec;
2379         const struct rte_flow_item_gtp *gtp_mask;
2380         const struct rte_flow_item_gtp_psc *mask;
2381         const struct rte_flow_item_gtp_psc nic_mask = {
2382                 .hdr.type = 0xF,
2383                 .hdr.qfi = 0x3F,
2384         };
2385
2386         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2387                 return rte_flow_error_set
2388                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2389                          "GTP PSC item must be preceded with GTP item");
2390         gtp_spec = gtp_item->spec;
2391         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2392         /* GTP spec and E flag is requested to match zero. */
2393         if (gtp_spec &&
2394                 (gtp_mask->v_pt_rsv_flags &
2395                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2396                 return rte_flow_error_set
2397                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2398                          "GTP E flag must be 1 to match GTP PSC");
2399         /* Check the flow is not created in group zero. */
2400         if (!attr->transfer && !attr->group)
2401                 return rte_flow_error_set
2402                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2403                          "GTP PSC is not supported for group 0");
2404         /* GTP spec is here and E flag is requested to match zero. */
2405         if (!item->spec)
2406                 return 0;
2407         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2408         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2409                                          (const uint8_t *)&nic_mask,
2410                                          sizeof(struct rte_flow_item_gtp_psc),
2411                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2412 }
2413
2414 /**
2415  * Validate IPV4 item.
2416  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2417  * add specific validation of fragment_offset field,
2418  *
2419  * @param[in] item
2420  *   Item specification.
2421  * @param[in] item_flags
2422  *   Bit-fields that holds the items detected until now.
2423  * @param[out] error
2424  *   Pointer to error structure.
2425  *
2426  * @return
2427  *   0 on success, a negative errno value otherwise and rte_errno is set.
2428  */
2429 static int
2430 flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
2431                            const struct rte_flow_item *item,
2432                            uint64_t item_flags, uint64_t last_item,
2433                            uint16_t ether_type, struct rte_flow_error *error)
2434 {
2435         int ret;
2436         struct mlx5_priv *priv = dev->data->dev_private;
2437         struct mlx5_hca_attr *attr = &priv->sh->cdev->config.hca_attr;
2438         const struct rte_flow_item_ipv4 *spec = item->spec;
2439         const struct rte_flow_item_ipv4 *last = item->last;
2440         const struct rte_flow_item_ipv4 *mask = item->mask;
2441         rte_be16_t fragment_offset_spec = 0;
2442         rte_be16_t fragment_offset_last = 0;
2443         struct rte_flow_item_ipv4 nic_ipv4_mask = {
2444                 .hdr = {
2445                         .src_addr = RTE_BE32(0xffffffff),
2446                         .dst_addr = RTE_BE32(0xffffffff),
2447                         .type_of_service = 0xff,
2448                         .fragment_offset = RTE_BE16(0xffff),
2449                         .next_proto_id = 0xff,
2450                         .time_to_live = 0xff,
2451                 },
2452         };
2453
2454         if (mask && (mask->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK)) {
2455                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2456                 bool ihl_cap = !tunnel ?
2457                                attr->outer_ipv4_ihl : attr->inner_ipv4_ihl;
2458                 if (!ihl_cap)
2459                         return rte_flow_error_set(error, ENOTSUP,
2460                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2461                                                   item,
2462                                                   "IPV4 ihl offload not supported");
2463                 nic_ipv4_mask.hdr.version_ihl = mask->hdr.version_ihl;
2464         }
2465         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2466                                            ether_type, &nic_ipv4_mask,
2467                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2468         if (ret < 0)
2469                 return ret;
2470         if (spec && mask)
2471                 fragment_offset_spec = spec->hdr.fragment_offset &
2472                                        mask->hdr.fragment_offset;
2473         if (!fragment_offset_spec)
2474                 return 0;
2475         /*
2476          * spec and mask are valid, enforce using full mask to make sure the
2477          * complete value is used correctly.
2478          */
2479         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2480                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2481                 return rte_flow_error_set(error, EINVAL,
2482                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2483                                           item, "must use full mask for"
2484                                           " fragment_offset");
2485         /*
2486          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2487          * indicating this is 1st fragment of fragmented packet.
2488          * This is not yet supported in MLX5, return appropriate error message.
2489          */
2490         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2491                 return rte_flow_error_set(error, ENOTSUP,
2492                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2493                                           "match on first fragment not "
2494                                           "supported");
2495         if (fragment_offset_spec && !last)
2496                 return rte_flow_error_set(error, ENOTSUP,
2497                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2498                                           "specified value not supported");
2499         /* spec and last are valid, validate the specified range. */
2500         fragment_offset_last = last->hdr.fragment_offset &
2501                                mask->hdr.fragment_offset;
2502         /*
2503          * Match on fragment_offset spec 0x2001 and last 0x3fff
2504          * means MF is 1 and frag-offset is > 0.
2505          * This packet is fragment 2nd and onward, excluding last.
2506          * This is not yet supported in MLX5, return appropriate
2507          * error message.
2508          */
2509         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2510             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2511                 return rte_flow_error_set(error, ENOTSUP,
2512                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2513                                           last, "match on following "
2514                                           "fragments not supported");
2515         /*
2516          * Match on fragment_offset spec 0x0001 and last 0x1fff
2517          * means MF is 0 and frag-offset is > 0.
2518          * This packet is last fragment of fragmented packet.
2519          * This is not yet supported in MLX5, return appropriate
2520          * error message.
2521          */
2522         if (fragment_offset_spec == RTE_BE16(1) &&
2523             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2524                 return rte_flow_error_set(error, ENOTSUP,
2525                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2526                                           last, "match on last "
2527                                           "fragment not supported");
2528         /*
2529          * Match on fragment_offset spec 0x0001 and last 0x3fff
2530          * means MF and/or frag-offset is not 0.
2531          * This is a fragmented packet.
2532          * Other range values are invalid and rejected.
2533          */
2534         if (!(fragment_offset_spec == RTE_BE16(1) &&
2535               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2536                 return rte_flow_error_set(error, ENOTSUP,
2537                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2538                                           "specified range not supported");
2539         return 0;
2540 }
2541
2542 /**
2543  * Validate IPV6 fragment extension item.
2544  *
2545  * @param[in] item
2546  *   Item specification.
2547  * @param[in] item_flags
2548  *   Bit-fields that holds the items detected until now.
2549  * @param[out] error
2550  *   Pointer to error structure.
2551  *
2552  * @return
2553  *   0 on success, a negative errno value otherwise and rte_errno is set.
2554  */
2555 static int
2556 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2557                                     uint64_t item_flags,
2558                                     struct rte_flow_error *error)
2559 {
2560         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2561         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2562         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2563         rte_be16_t frag_data_spec = 0;
2564         rte_be16_t frag_data_last = 0;
2565         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2566         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2567                                       MLX5_FLOW_LAYER_OUTER_L4;
2568         int ret = 0;
2569         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2570                 .hdr = {
2571                         .next_header = 0xff,
2572                         .frag_data = RTE_BE16(0xffff),
2573                 },
2574         };
2575
2576         if (item_flags & l4m)
2577                 return rte_flow_error_set(error, EINVAL,
2578                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2579                                           "ipv6 fragment extension item cannot "
2580                                           "follow L4 item.");
2581         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2582             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2583                 return rte_flow_error_set(error, EINVAL,
2584                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2585                                           "ipv6 fragment extension item must "
2586                                           "follow ipv6 item");
2587         if (spec && mask)
2588                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2589         if (!frag_data_spec)
2590                 return 0;
2591         /*
2592          * spec and mask are valid, enforce using full mask to make sure the
2593          * complete value is used correctly.
2594          */
2595         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2596                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2597                 return rte_flow_error_set(error, EINVAL,
2598                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2599                                           item, "must use full mask for"
2600                                           " frag_data");
2601         /*
2602          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2603          * This is 1st fragment of fragmented packet.
2604          */
2605         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2606                 return rte_flow_error_set(error, ENOTSUP,
2607                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2608                                           "match on first fragment not "
2609                                           "supported");
2610         if (frag_data_spec && !last)
2611                 return rte_flow_error_set(error, EINVAL,
2612                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2613                                           "specified value not supported");
2614         ret = mlx5_flow_item_acceptable
2615                                 (item, (const uint8_t *)mask,
2616                                  (const uint8_t *)&nic_mask,
2617                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2618                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2619         if (ret)
2620                 return ret;
2621         /* spec and last are valid, validate the specified range. */
2622         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2623         /*
2624          * Match on frag_data spec 0x0009 and last 0xfff9
2625          * means M is 1 and frag-offset is > 0.
2626          * This packet is fragment 2nd and onward, excluding last.
2627          * This is not yet supported in MLX5, return appropriate
2628          * error message.
2629          */
2630         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2631                                        RTE_IPV6_EHDR_MF_MASK) &&
2632             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2633                 return rte_flow_error_set(error, ENOTSUP,
2634                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2635                                           last, "match on following "
2636                                           "fragments not supported");
2637         /*
2638          * Match on frag_data spec 0x0008 and last 0xfff8
2639          * means M is 0 and frag-offset is > 0.
2640          * This packet is last fragment of fragmented packet.
2641          * This is not yet supported in MLX5, return appropriate
2642          * error message.
2643          */
2644         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2645             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2646                 return rte_flow_error_set(error, ENOTSUP,
2647                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2648                                           last, "match on last "
2649                                           "fragment not supported");
2650         /* Other range values are invalid and rejected. */
2651         return rte_flow_error_set(error, EINVAL,
2652                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2653                                   "specified range not supported");
2654 }
2655
2656 /*
2657  * Validate ASO CT item.
2658  *
2659  * @param[in] dev
2660  *   Pointer to the rte_eth_dev structure.
2661  * @param[in] item
2662  *   Item specification.
2663  * @param[in] item_flags
2664  *   Pointer to bit-fields that holds the items detected until now.
2665  * @param[out] error
2666  *   Pointer to error structure.
2667  *
2668  * @return
2669  *   0 on success, a negative errno value otherwise and rte_errno is set.
2670  */
2671 static int
2672 flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
2673                              const struct rte_flow_item *item,
2674                              uint64_t *item_flags,
2675                              struct rte_flow_error *error)
2676 {
2677         const struct rte_flow_item_conntrack *spec = item->spec;
2678         const struct rte_flow_item_conntrack *mask = item->mask;
2679         RTE_SET_USED(dev);
2680         uint32_t flags;
2681
2682         if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
2683                 return rte_flow_error_set(error, EINVAL,
2684                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2685                                           "Only one CT is supported");
2686         if (!mask)
2687                 mask = &rte_flow_item_conntrack_mask;
2688         flags = spec->flags & mask->flags;
2689         if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
2690             ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
2691              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
2692              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
2693                 return rte_flow_error_set(error, EINVAL,
2694                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2695                                           "Conflict status bits");
2696         /* State change also needs to be considered. */
2697         *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
2698         return 0;
2699 }
2700
2701 /**
2702  * Validate the pop VLAN action.
2703  *
2704  * @param[in] dev
2705  *   Pointer to the rte_eth_dev structure.
2706  * @param[in] action_flags
2707  *   Holds the actions detected until now.
2708  * @param[in] action
2709  *   Pointer to the pop vlan action.
2710  * @param[in] item_flags
2711  *   The items found in this flow rule.
2712  * @param[in] attr
2713  *   Pointer to flow attributes.
2714  * @param[out] error
2715  *   Pointer to error structure.
2716  *
2717  * @return
2718  *   0 on success, a negative errno value otherwise and rte_errno is set.
2719  */
2720 static int
2721 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2722                                  uint64_t action_flags,
2723                                  const struct rte_flow_action *action,
2724                                  uint64_t item_flags,
2725                                  const struct rte_flow_attr *attr,
2726                                  struct rte_flow_error *error)
2727 {
2728         const struct mlx5_priv *priv = dev->data->dev_private;
2729         struct mlx5_dev_ctx_shared *sh = priv->sh;
2730         bool direction_error = false;
2731
2732         if (!priv->sh->pop_vlan_action)
2733                 return rte_flow_error_set(error, ENOTSUP,
2734                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2735                                           NULL,
2736                                           "pop vlan action is not supported");
2737         /* Pop VLAN is not supported in egress except for CX6 FDB mode. */
2738         if (attr->transfer) {
2739                 bool fdb_tx = priv->representor_id != UINT16_MAX;
2740                 bool is_cx5 = sh->steering_format_version ==
2741                     MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
2742
2743                 if (fdb_tx && is_cx5)
2744                         direction_error = true;
2745         } else if (attr->egress) {
2746                 direction_error = true;
2747         }
2748         if (direction_error)
2749                 return rte_flow_error_set(error, ENOTSUP,
2750                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2751                                           NULL,
2752                                           "pop vlan action not supported for egress");
2753         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2754                 return rte_flow_error_set(error, ENOTSUP,
2755                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2756                                           "no support for multiple VLAN "
2757                                           "actions");
2758         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2759         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2760             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2761                 return rte_flow_error_set(error, ENOTSUP,
2762                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2763                                           NULL,
2764                                           "cannot pop vlan after decap without "
2765                                           "match on inner vlan in the flow");
2766         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2767         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2768             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2769                 return rte_flow_error_set(error, ENOTSUP,
2770                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2771                                           NULL,
2772                                           "cannot pop vlan without a "
2773                                           "match on (outer) vlan in the flow");
2774         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2775                 return rte_flow_error_set(error, EINVAL,
2776                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2777                                           "wrong action order, port_id should "
2778                                           "be after pop VLAN action");
2779         if (!attr->transfer && priv->representor)
2780                 return rte_flow_error_set(error, ENOTSUP,
2781                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2782                                           "pop vlan action for VF representor "
2783                                           "not supported on NIC table");
2784         return 0;
2785 }
2786
2787 /**
2788  * Get VLAN default info from vlan match info.
2789  *
2790  * @param[in] items
2791  *   the list of item specifications.
2792  * @param[out] vlan
2793  *   pointer VLAN info to fill to.
2794  *
2795  * @return
2796  *   0 on success, a negative errno value otherwise and rte_errno is set.
2797  */
2798 static void
2799 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2800                                   struct rte_vlan_hdr *vlan)
2801 {
2802         const struct rte_flow_item_vlan nic_mask = {
2803                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2804                                 MLX5DV_FLOW_VLAN_VID_MASK),
2805                 .inner_type = RTE_BE16(0xffff),
2806         };
2807
2808         if (items == NULL)
2809                 return;
2810         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2811                 int type = items->type;
2812
2813                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2814                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2815                         break;
2816         }
2817         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2818                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2819                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2820
2821                 /* If VLAN item in pattern doesn't contain data, return here. */
2822                 if (!vlan_v)
2823                         return;
2824                 if (!vlan_m)
2825                         vlan_m = &nic_mask;
2826                 /* Only full match values are accepted */
2827                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2828                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2829                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2830                         vlan->vlan_tci |=
2831                                 rte_be_to_cpu_16(vlan_v->tci &
2832                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2833                 }
2834                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2835                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2836                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2837                         vlan->vlan_tci |=
2838                                 rte_be_to_cpu_16(vlan_v->tci &
2839                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2840                 }
2841                 if (vlan_m->inner_type == nic_mask.inner_type)
2842                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2843                                                            vlan_m->inner_type);
2844         }
2845 }
2846
2847 /**
2848  * Validate the push VLAN action.
2849  *
2850  * @param[in] dev
2851  *   Pointer to the rte_eth_dev structure.
2852  * @param[in] action_flags
2853  *   Holds the actions detected until now.
2854  * @param[in] item_flags
2855  *   The items found in this flow rule.
2856  * @param[in] action
2857  *   Pointer to the action structure.
2858  * @param[in] attr
2859  *   Pointer to flow attributes
2860  * @param[out] error
2861  *   Pointer to error structure.
2862  *
2863  * @return
2864  *   0 on success, a negative errno value otherwise and rte_errno is set.
2865  */
2866 static int
2867 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2868                                   uint64_t action_flags,
2869                                   const struct rte_flow_item_vlan *vlan_m,
2870                                   const struct rte_flow_action *action,
2871                                   const struct rte_flow_attr *attr,
2872                                   struct rte_flow_error *error)
2873 {
2874         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2875         const struct mlx5_priv *priv = dev->data->dev_private;
2876
2877         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2878             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2879                 return rte_flow_error_set(error, EINVAL,
2880                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2881                                           "invalid vlan ethertype");
2882         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2883                 return rte_flow_error_set(error, EINVAL,
2884                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2885                                           "wrong action order, port_id should "
2886                                           "be after push VLAN");
2887         if (!attr->transfer && priv->representor)
2888                 return rte_flow_error_set(error, ENOTSUP,
2889                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2890                                           "push vlan action for VF representor "
2891                                           "not supported on NIC table");
2892         if (vlan_m &&
2893             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2894             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2895                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2896             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2897             !(mlx5_flow_find_action
2898                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2899                 return rte_flow_error_set(error, EINVAL,
2900                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2901                                           "not full match mask on VLAN PCP and "
2902                                           "there is no of_set_vlan_pcp action, "
2903                                           "push VLAN action cannot figure out "
2904                                           "PCP value");
2905         if (vlan_m &&
2906             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2907             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2908                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2909             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2910             !(mlx5_flow_find_action
2911                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2912                 return rte_flow_error_set(error, EINVAL,
2913                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2914                                           "not full match mask on VLAN VID and "
2915                                           "there is no of_set_vlan_vid action, "
2916                                           "push VLAN action cannot figure out "
2917                                           "VID value");
2918         (void)attr;
2919         return 0;
2920 }
2921
2922 /**
2923  * Validate the set VLAN PCP.
2924  *
2925  * @param[in] action_flags
2926  *   Holds the actions detected until now.
2927  * @param[in] actions
2928  *   Pointer to the list of actions remaining in the flow rule.
2929  * @param[out] error
2930  *   Pointer to error structure.
2931  *
2932  * @return
2933  *   0 on success, a negative errno value otherwise and rte_errno is set.
2934  */
2935 static int
2936 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2937                                      const struct rte_flow_action actions[],
2938                                      struct rte_flow_error *error)
2939 {
2940         const struct rte_flow_action *action = actions;
2941         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2942
2943         if (conf->vlan_pcp > 7)
2944                 return rte_flow_error_set(error, EINVAL,
2945                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2946                                           "VLAN PCP value is too big");
2947         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2948                 return rte_flow_error_set(error, ENOTSUP,
2949                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2950                                           "set VLAN PCP action must follow "
2951                                           "the push VLAN action");
2952         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2953                 return rte_flow_error_set(error, ENOTSUP,
2954                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2955                                           "Multiple VLAN PCP modification are "
2956                                           "not supported");
2957         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2958                 return rte_flow_error_set(error, EINVAL,
2959                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2960                                           "wrong action order, port_id should "
2961                                           "be after set VLAN PCP");
2962         return 0;
2963 }
2964
2965 /**
2966  * Validate the set VLAN VID.
2967  *
2968  * @param[in] item_flags
2969  *   Holds the items detected in this rule.
2970  * @param[in] action_flags
2971  *   Holds the actions detected until now.
2972  * @param[in] actions
2973  *   Pointer to the list of actions remaining in the flow rule.
2974  * @param[out] error
2975  *   Pointer to error structure.
2976  *
2977  * @return
2978  *   0 on success, a negative errno value otherwise and rte_errno is set.
2979  */
2980 static int
2981 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2982                                      uint64_t action_flags,
2983                                      const struct rte_flow_action actions[],
2984                                      struct rte_flow_error *error)
2985 {
2986         const struct rte_flow_action *action = actions;
2987         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2988
2989         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2990                 return rte_flow_error_set(error, EINVAL,
2991                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2992                                           "VLAN VID value is too big");
2993         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2994             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2995                 return rte_flow_error_set(error, ENOTSUP,
2996                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2997                                           "set VLAN VID action must follow push"
2998                                           " VLAN action or match on VLAN item");
2999         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
3000                 return rte_flow_error_set(error, ENOTSUP,
3001                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3002                                           "Multiple VLAN VID modifications are "
3003                                           "not supported");
3004         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
3005                 return rte_flow_error_set(error, EINVAL,
3006                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3007                                           "wrong action order, port_id should "
3008                                           "be after set VLAN VID");
3009         return 0;
3010 }
3011
3012 /*
3013  * Validate the FLAG action.
3014  *
3015  * @param[in] dev
3016  *   Pointer to the rte_eth_dev structure.
3017  * @param[in] action_flags
3018  *   Holds the actions detected until now.
3019  * @param[in] attr
3020  *   Pointer to flow attributes
3021  * @param[out] error
3022  *   Pointer to error structure.
3023  *
3024  * @return
3025  *   0 on success, a negative errno value otherwise and rte_errno is set.
3026  */
3027 static int
3028 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
3029                              uint64_t action_flags,
3030                              const struct rte_flow_attr *attr,
3031                              struct rte_flow_error *error)
3032 {
3033         struct mlx5_priv *priv = dev->data->dev_private;
3034         struct mlx5_sh_config *config = &priv->sh->config;
3035         int ret;
3036
3037         /* Fall back if no extended metadata register support. */
3038         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3039                 return mlx5_flow_validate_action_flag(action_flags, attr,
3040                                                       error);
3041         /* Extensive metadata mode requires registers. */
3042         if (!mlx5_flow_ext_mreg_supported(dev))
3043                 return rte_flow_error_set(error, ENOTSUP,
3044                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3045                                           "no metadata registers "
3046                                           "to support flag action");
3047         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
3048                 return rte_flow_error_set(error, ENOTSUP,
3049                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3050                                           "extended metadata register"
3051                                           " isn't available");
3052         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3053         if (ret < 0)
3054                 return ret;
3055         MLX5_ASSERT(ret > 0);
3056         if (action_flags & MLX5_FLOW_ACTION_MARK)
3057                 return rte_flow_error_set(error, EINVAL,
3058                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3059                                           "can't mark and flag in same flow");
3060         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3061                 return rte_flow_error_set(error, EINVAL,
3062                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3063                                           "can't have 2 flag"
3064                                           " actions in same flow");
3065         return 0;
3066 }
3067
3068 /**
3069  * Validate MARK action.
3070  *
3071  * @param[in] dev
3072  *   Pointer to the rte_eth_dev structure.
3073  * @param[in] action
3074  *   Pointer to action.
3075  * @param[in] action_flags
3076  *   Holds the actions detected until now.
3077  * @param[in] attr
3078  *   Pointer to flow attributes
3079  * @param[out] error
3080  *   Pointer to error structure.
3081  *
3082  * @return
3083  *   0 on success, a negative errno value otherwise and rte_errno is set.
3084  */
3085 static int
3086 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
3087                              const struct rte_flow_action *action,
3088                              uint64_t action_flags,
3089                              const struct rte_flow_attr *attr,
3090                              struct rte_flow_error *error)
3091 {
3092         struct mlx5_priv *priv = dev->data->dev_private;
3093         struct mlx5_sh_config *config = &priv->sh->config;
3094         const struct rte_flow_action_mark *mark = action->conf;
3095         int ret;
3096
3097         if (is_tunnel_offload_active(dev))
3098                 return rte_flow_error_set(error, ENOTSUP,
3099                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3100                                           "no mark action "
3101                                           "if tunnel offload active");
3102         /* Fall back if no extended metadata register support. */
3103         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3104                 return mlx5_flow_validate_action_mark(action, action_flags,
3105                                                       attr, error);
3106         /* Extensive metadata mode requires registers. */
3107         if (!mlx5_flow_ext_mreg_supported(dev))
3108                 return rte_flow_error_set(error, ENOTSUP,
3109                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3110                                           "no metadata registers "
3111                                           "to support mark action");
3112         if (!priv->sh->dv_mark_mask)
3113                 return rte_flow_error_set(error, ENOTSUP,
3114                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3115                                           "extended metadata register"
3116                                           " isn't available");
3117         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3118         if (ret < 0)
3119                 return ret;
3120         MLX5_ASSERT(ret > 0);
3121         if (!mark)
3122                 return rte_flow_error_set(error, EINVAL,
3123                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3124                                           "configuration cannot be null");
3125         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3126                 return rte_flow_error_set(error, EINVAL,
3127                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3128                                           &mark->id,
3129                                           "mark id exceeds the limit");
3130         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3131                 return rte_flow_error_set(error, EINVAL,
3132                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3133                                           "can't flag and mark in same flow");
3134         if (action_flags & MLX5_FLOW_ACTION_MARK)
3135                 return rte_flow_error_set(error, EINVAL,
3136                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3137                                           "can't have 2 mark actions in same"
3138                                           " flow");
3139         return 0;
3140 }
3141
3142 /**
3143  * Validate SET_META action.
3144  *
3145  * @param[in] dev
3146  *   Pointer to the rte_eth_dev structure.
3147  * @param[in] action
3148  *   Pointer to the action structure.
3149  * @param[in] action_flags
3150  *   Holds the actions detected until now.
3151  * @param[in] attr
3152  *   Pointer to flow attributes
3153  * @param[out] error
3154  *   Pointer to error structure.
3155  *
3156  * @return
3157  *   0 on success, a negative errno value otherwise and rte_errno is set.
3158  */
3159 static int
3160 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3161                                  const struct rte_flow_action *action,
3162                                  uint64_t action_flags __rte_unused,
3163                                  const struct rte_flow_attr *attr,
3164                                  struct rte_flow_error *error)
3165 {
3166         struct mlx5_priv *priv = dev->data->dev_private;
3167         struct mlx5_sh_config *config = &priv->sh->config;
3168         const struct rte_flow_action_set_meta *conf;
3169         uint32_t nic_mask = UINT32_MAX;
3170         int reg;
3171
3172         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
3173             !mlx5_flow_ext_mreg_supported(dev))
3174                 return rte_flow_error_set(error, ENOTSUP,
3175                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3176                                           "extended metadata register"
3177                                           " isn't supported");
3178         reg = flow_dv_get_metadata_reg(dev, attr, error);
3179         if (reg < 0)
3180                 return reg;
3181         if (reg == REG_NON)
3182                 return rte_flow_error_set(error, ENOTSUP,
3183                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3184                                           "unavailable extended metadata register");
3185         if (reg != REG_A && reg != REG_B) {
3186                 struct mlx5_priv *priv = dev->data->dev_private;
3187
3188                 nic_mask = priv->sh->dv_meta_mask;
3189         }
3190         if (!(action->conf))
3191                 return rte_flow_error_set(error, EINVAL,
3192                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3193                                           "configuration cannot be null");
3194         conf = (const struct rte_flow_action_set_meta *)action->conf;
3195         if (!conf->mask)
3196                 return rte_flow_error_set(error, EINVAL,
3197                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3198                                           "zero mask doesn't have any effect");
3199         if (conf->mask & ~nic_mask)
3200                 return rte_flow_error_set(error, EINVAL,
3201                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3202                                           "meta data must be within reg C0");
3203         return 0;
3204 }
3205
3206 /**
3207  * Validate SET_TAG action.
3208  *
3209  * @param[in] dev
3210  *   Pointer to the rte_eth_dev structure.
3211  * @param[in] action
3212  *   Pointer to the action structure.
3213  * @param[in] action_flags
3214  *   Holds the actions detected until now.
3215  * @param[in] attr
3216  *   Pointer to flow attributes
3217  * @param[out] error
3218  *   Pointer to error structure.
3219  *
3220  * @return
3221  *   0 on success, a negative errno value otherwise and rte_errno is set.
3222  */
3223 static int
3224 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3225                                 const struct rte_flow_action *action,
3226                                 uint64_t action_flags,
3227                                 const struct rte_flow_attr *attr,
3228                                 struct rte_flow_error *error)
3229 {
3230         const struct rte_flow_action_set_tag *conf;
3231         const uint64_t terminal_action_flags =
3232                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3233                 MLX5_FLOW_ACTION_RSS;
3234         int ret;
3235
3236         if (!mlx5_flow_ext_mreg_supported(dev))
3237                 return rte_flow_error_set(error, ENOTSUP,
3238                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3239                                           "extensive metadata register"
3240                                           " isn't supported");
3241         if (!(action->conf))
3242                 return rte_flow_error_set(error, EINVAL,
3243                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3244                                           "configuration cannot be null");
3245         conf = (const struct rte_flow_action_set_tag *)action->conf;
3246         if (!conf->mask)
3247                 return rte_flow_error_set(error, EINVAL,
3248                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3249                                           "zero mask doesn't have any effect");
3250         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3251         if (ret < 0)
3252                 return ret;
3253         if (!attr->transfer && attr->ingress &&
3254             (action_flags & terminal_action_flags))
3255                 return rte_flow_error_set(error, EINVAL,
3256                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3257                                           "set_tag has no effect"
3258                                           " with terminal actions");
3259         return 0;
3260 }
3261
3262 /**
3263  * Indicates whether ASO aging is supported.
3264  *
3265  * @param[in] sh
3266  *   Pointer to shared device context structure.
3267  * @param[in] attr
3268  *   Attributes of flow that includes AGE action.
3269  *
3270  * @return
3271  *   True when ASO aging is supported, false otherwise.
3272  */
3273 static inline bool
3274 flow_hit_aso_supported(const struct mlx5_dev_ctx_shared *sh,
3275                 const struct rte_flow_attr *attr)
3276 {
3277         MLX5_ASSERT(sh && attr);
3278         return (sh->flow_hit_aso_en && (attr->transfer || attr->group));
3279 }
3280
3281 /**
3282  * Validate count action.
3283  *
3284  * @param[in] dev
3285  *   Pointer to rte_eth_dev structure.
3286  * @param[in] shared
3287  *   Indicator if action is shared.
3288  * @param[in] action_flags
3289  *   Holds the actions detected until now.
3290  * @param[in] attr
3291  *   Attributes of flow that includes this action.
3292  * @param[out] error
3293  *   Pointer to error structure.
3294  *
3295  * @return
3296  *   0 on success, a negative errno value otherwise and rte_errno is set.
3297  */
3298 static int
3299 flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
3300                               uint64_t action_flags,
3301                               const struct rte_flow_attr *attr,
3302                               struct rte_flow_error *error)
3303 {
3304         struct mlx5_priv *priv = dev->data->dev_private;
3305
3306         if (!priv->sh->cdev->config.devx)
3307                 goto notsup_err;
3308         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3309                 return rte_flow_error_set(error, EINVAL,
3310                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3311                                           "duplicate count actions set");
3312         if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3313             !flow_hit_aso_supported(priv->sh, attr))
3314                 return rte_flow_error_set(error, EINVAL,
3315                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3316                                           "old age and indirect count combination is not supported");
3317 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3318         return 0;
3319 #endif
3320 notsup_err:
3321         return rte_flow_error_set
3322                       (error, ENOTSUP,
3323                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3324                        NULL,
3325                        "count action not supported");
3326 }
3327
3328 /**
3329  * Validate the L2 encap action.
3330  *
3331  * @param[in] dev
3332  *   Pointer to the rte_eth_dev structure.
3333  * @param[in] action_flags
3334  *   Holds the actions detected until now.
3335  * @param[in] action
3336  *   Pointer to the action structure.
3337  * @param[in] attr
3338  *   Pointer to flow attributes.
3339  * @param[out] error
3340  *   Pointer to error structure.
3341  *
3342  * @return
3343  *   0 on success, a negative errno value otherwise and rte_errno is set.
3344  */
3345 static int
3346 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3347                                  uint64_t action_flags,
3348                                  const struct rte_flow_action *action,
3349                                  const struct rte_flow_attr *attr,
3350                                  struct rte_flow_error *error)
3351 {
3352         const struct mlx5_priv *priv = dev->data->dev_private;
3353
3354         if (!(action->conf))
3355                 return rte_flow_error_set(error, EINVAL,
3356                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3357                                           "configuration cannot be null");
3358         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3359                 return rte_flow_error_set(error, EINVAL,
3360                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3361                                           "can only have a single encap action "
3362                                           "in a flow");
3363         if (!attr->transfer && priv->representor)
3364                 return rte_flow_error_set(error, ENOTSUP,
3365                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3366                                           "encap action for VF representor "
3367                                           "not supported on NIC table");
3368         return 0;
3369 }
3370
3371 /**
3372  * Validate a decap action.
3373  *
3374  * @param[in] dev
3375  *   Pointer to the rte_eth_dev structure.
3376  * @param[in] action_flags
3377  *   Holds the actions detected until now.
3378  * @param[in] action
3379  *   Pointer to the action structure.
3380  * @param[in] item_flags
3381  *   Holds the items detected.
3382  * @param[in] attr
3383  *   Pointer to flow attributes
3384  * @param[out] error
3385  *   Pointer to error structure.
3386  *
3387  * @return
3388  *   0 on success, a negative errno value otherwise and rte_errno is set.
3389  */
3390 static int
3391 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3392                               uint64_t action_flags,
3393                               const struct rte_flow_action *action,
3394                               const uint64_t item_flags,
3395                               const struct rte_flow_attr *attr,
3396                               struct rte_flow_error *error)
3397 {
3398         const struct mlx5_priv *priv = dev->data->dev_private;
3399
3400         if (priv->sh->cdev->config.hca_attr.scatter_fcs_w_decap_disable &&
3401             !priv->sh->config.decap_en)
3402                 return rte_flow_error_set(error, ENOTSUP,
3403                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3404                                           "decap is not enabled");
3405         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3406                 return rte_flow_error_set(error, ENOTSUP,
3407                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3408                                           action_flags &
3409                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3410                                           "have a single decap action" : "decap "
3411                                           "after encap is not supported");
3412         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3413                 return rte_flow_error_set(error, EINVAL,
3414                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3415                                           "can't have decap action after"
3416                                           " modify action");
3417         if (attr->egress)
3418                 return rte_flow_error_set(error, ENOTSUP,
3419                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3420                                           NULL,
3421                                           "decap action not supported for "
3422                                           "egress");
3423         if (!attr->transfer && priv->representor)
3424                 return rte_flow_error_set(error, ENOTSUP,
3425                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3426                                           "decap action for VF representor "
3427                                           "not supported on NIC table");
3428         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3429             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3430                 return rte_flow_error_set(error, ENOTSUP,
3431                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3432                                 "VXLAN item should be present for VXLAN decap");
3433         return 0;
3434 }
3435
3436 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3437
3438 /**
3439  * Validate the raw encap and decap actions.
3440  *
3441  * @param[in] dev
3442  *   Pointer to the rte_eth_dev structure.
3443  * @param[in] decap
3444  *   Pointer to the decap action.
3445  * @param[in] encap
3446  *   Pointer to the encap action.
3447  * @param[in] attr
3448  *   Pointer to flow attributes
3449  * @param[in/out] action_flags
3450  *   Holds the actions detected until now.
3451  * @param[out] actions_n
3452  *   pointer to the number of actions counter.
3453  * @param[in] action
3454  *   Pointer to the action structure.
3455  * @param[in] item_flags
3456  *   Holds the items detected.
3457  * @param[out] error
3458  *   Pointer to error structure.
3459  *
3460  * @return
3461  *   0 on success, a negative errno value otherwise and rte_errno is set.
3462  */
3463 static int
3464 flow_dv_validate_action_raw_encap_decap
3465         (struct rte_eth_dev *dev,
3466          const struct rte_flow_action_raw_decap *decap,
3467          const struct rte_flow_action_raw_encap *encap,
3468          const struct rte_flow_attr *attr, uint64_t *action_flags,
3469          int *actions_n, const struct rte_flow_action *action,
3470          uint64_t item_flags, struct rte_flow_error *error)
3471 {
3472         const struct mlx5_priv *priv = dev->data->dev_private;
3473         int ret;
3474
3475         if (encap && (!encap->size || !encap->data))
3476                 return rte_flow_error_set(error, EINVAL,
3477                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3478                                           "raw encap data cannot be empty");
3479         if (decap && encap) {
3480                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3481                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3482                         /* L3 encap. */
3483                         decap = NULL;
3484                 else if (encap->size <=
3485                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3486                            decap->size >
3487                            MLX5_ENCAPSULATION_DECISION_SIZE)
3488                         /* L3 decap. */
3489                         encap = NULL;
3490                 else if (encap->size >
3491                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3492                            decap->size >
3493                            MLX5_ENCAPSULATION_DECISION_SIZE)
3494                         /* 2 L2 actions: encap and decap. */
3495                         ;
3496                 else
3497                         return rte_flow_error_set(error,
3498                                 ENOTSUP,
3499                                 RTE_FLOW_ERROR_TYPE_ACTION,
3500                                 NULL, "unsupported too small "
3501                                 "raw decap and too small raw "
3502                                 "encap combination");
3503         }
3504         if (decap) {
3505                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3506                                                     item_flags, attr, error);
3507                 if (ret < 0)
3508                         return ret;
3509                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3510                 ++(*actions_n);
3511         }
3512         if (encap) {
3513                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3514                         return rte_flow_error_set(error, ENOTSUP,
3515                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3516                                                   NULL,
3517                                                   "small raw encap size");
3518                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3519                         return rte_flow_error_set(error, EINVAL,
3520                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3521                                                   NULL,
3522                                                   "more than one encap action");
3523                 if (!attr->transfer && priv->representor)
3524                         return rte_flow_error_set
3525                                         (error, ENOTSUP,
3526                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3527                                          "encap action for VF representor "
3528                                          "not supported on NIC table");
3529                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3530                 ++(*actions_n);
3531         }
3532         return 0;
3533 }
3534
3535 /*
3536  * Validate the ASO CT action.
3537  *
3538  * @param[in] dev
3539  *   Pointer to the rte_eth_dev structure.
3540  * @param[in] action_flags
3541  *   Holds the actions detected until now.
3542  * @param[in] item_flags
3543  *   The items found in this flow rule.
3544  * @param[in] attr
3545  *   Pointer to flow attributes.
3546  * @param[out] error
3547  *   Pointer to error structure.
3548  *
3549  * @return
3550  *   0 on success, a negative errno value otherwise and rte_errno is set.
3551  */
3552 static int
3553 flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3554                                uint64_t action_flags,
3555                                uint64_t item_flags,
3556                                const struct rte_flow_attr *attr,
3557                                struct rte_flow_error *error)
3558 {
3559         RTE_SET_USED(dev);
3560
3561         if (attr->group == 0 && !attr->transfer)
3562                 return rte_flow_error_set(error, ENOTSUP,
3563                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3564                                           NULL,
3565                                           "Only support non-root table");
3566         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
3567                 return rte_flow_error_set(error, ENOTSUP,
3568                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3569                                           "CT cannot follow a fate action");
3570         if ((action_flags & MLX5_FLOW_ACTION_METER) ||
3571             (action_flags & MLX5_FLOW_ACTION_AGE))
3572                 return rte_flow_error_set(error, EINVAL,
3573                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3574                                           "Only one ASO action is supported");
3575         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3576                 return rte_flow_error_set(error, EINVAL,
3577                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3578                                           "Encap cannot exist before CT");
3579         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3580                 return rte_flow_error_set(error, EINVAL,
3581                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3582                                           "Not a outer TCP packet");
3583         return 0;
3584 }
3585
3586 int
3587 flow_dv_encap_decap_match_cb(void *tool_ctx __rte_unused,
3588                              struct mlx5_list_entry *entry, void *cb_ctx)
3589 {
3590         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3591         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3592         struct mlx5_flow_dv_encap_decap_resource *resource;
3593
3594         resource = container_of(entry, struct mlx5_flow_dv_encap_decap_resource,
3595                                 entry);
3596         if (resource->reformat_type == ctx_resource->reformat_type &&
3597             resource->ft_type == ctx_resource->ft_type &&
3598             resource->flags == ctx_resource->flags &&
3599             resource->size == ctx_resource->size &&
3600             !memcmp((const void *)resource->buf,
3601                     (const void *)ctx_resource->buf,
3602                     resource->size))
3603                 return 0;
3604         return -1;
3605 }
3606
3607 struct mlx5_list_entry *
3608 flow_dv_encap_decap_create_cb(void *tool_ctx, void *cb_ctx)
3609 {
3610         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3611         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3612         struct mlx5dv_dr_domain *domain;
3613         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3614         struct mlx5_flow_dv_encap_decap_resource *resource;
3615         uint32_t idx;
3616         int ret;
3617
3618         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3619                 domain = sh->fdb_domain;
3620         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3621                 domain = sh->rx_domain;
3622         else
3623                 domain = sh->tx_domain;
3624         /* Register new encap/decap resource. */
3625         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &idx);
3626         if (!resource) {
3627                 rte_flow_error_set(ctx->error, ENOMEM,
3628                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3629                                    "cannot allocate resource memory");
3630                 return NULL;
3631         }
3632         *resource = *ctx_resource;
3633         resource->idx = idx;
3634         ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->cdev->ctx,
3635                                                               domain, resource,
3636                                                              &resource->action);
3637         if (ret) {
3638                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3639                 rte_flow_error_set(ctx->error, ENOMEM,
3640                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3641                                    NULL, "cannot create action");
3642                 return NULL;
3643         }
3644
3645         return &resource->entry;
3646 }
3647
3648 struct mlx5_list_entry *
3649 flow_dv_encap_decap_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
3650                              void *cb_ctx)
3651 {
3652         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3653         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3654         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3655         uint32_t idx;
3656
3657         cache_resource = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3658                                            &idx);
3659         if (!cache_resource) {
3660                 rte_flow_error_set(ctx->error, ENOMEM,
3661                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3662                                    "cannot allocate resource memory");
3663                 return NULL;
3664         }
3665         memcpy(cache_resource, oentry, sizeof(*cache_resource));
3666         cache_resource->idx = idx;
3667         return &cache_resource->entry;
3668 }
3669
3670 void
3671 flow_dv_encap_decap_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3672 {
3673         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3674         struct mlx5_flow_dv_encap_decap_resource *res =
3675                                        container_of(entry, typeof(*res), entry);
3676
3677         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
3678 }
3679
3680 /**
3681  * Find existing encap/decap resource or create and register a new one.
3682  *
3683  * @param[in, out] dev
3684  *   Pointer to rte_eth_dev structure.
3685  * @param[in, out] resource
3686  *   Pointer to encap/decap resource.
3687  * @parm[in, out] dev_flow
3688  *   Pointer to the dev_flow.
3689  * @param[out] error
3690  *   pointer to error structure.
3691  *
3692  * @return
3693  *   0 on success otherwise -errno and errno is set.
3694  */
3695 static int
3696 flow_dv_encap_decap_resource_register
3697                         (struct rte_eth_dev *dev,
3698                          struct mlx5_flow_dv_encap_decap_resource *resource,
3699                          struct mlx5_flow *dev_flow,
3700                          struct rte_flow_error *error)
3701 {
3702         struct mlx5_priv *priv = dev->data->dev_private;
3703         struct mlx5_dev_ctx_shared *sh = priv->sh;
3704         struct mlx5_list_entry *entry;
3705         union {
3706                 struct {
3707                         uint32_t ft_type:8;
3708                         uint32_t refmt_type:8;
3709                         /*
3710                          * Header reformat actions can be shared between
3711                          * non-root tables. One bit to indicate non-root
3712                          * table or not.
3713                          */
3714                         uint32_t is_root:1;
3715                         uint32_t reserve:15;
3716                 };
3717                 uint32_t v32;
3718         } encap_decap_key = {
3719                 {
3720                         .ft_type = resource->ft_type,
3721                         .refmt_type = resource->reformat_type,
3722                         .is_root = !!dev_flow->dv.group,
3723                         .reserve = 0,
3724                 }
3725         };
3726         struct mlx5_flow_cb_ctx ctx = {
3727                 .error = error,
3728                 .data = resource,
3729         };
3730         struct mlx5_hlist *encaps_decaps;
3731         uint64_t key64;
3732
3733         encaps_decaps = flow_dv_hlist_prepare(sh, &sh->encaps_decaps,
3734                                 "encaps_decaps",
3735                                 MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ,
3736                                 true, true, sh,
3737                                 flow_dv_encap_decap_create_cb,
3738                                 flow_dv_encap_decap_match_cb,
3739                                 flow_dv_encap_decap_remove_cb,
3740                                 flow_dv_encap_decap_clone_cb,
3741                                 flow_dv_encap_decap_clone_free_cb,
3742                                 error);
3743         if (unlikely(!encaps_decaps))
3744                 return -rte_errno;
3745         resource->flags = dev_flow->dv.group ? 0 : 1;
3746         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3747                                  sizeof(encap_decap_key.v32), 0);
3748         if (resource->reformat_type !=
3749             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3750             resource->size)
3751                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3752         entry = mlx5_hlist_register(encaps_decaps, key64, &ctx);
3753         if (!entry)
3754                 return -rte_errno;
3755         resource = container_of(entry, typeof(*resource), entry);
3756         dev_flow->dv.encap_decap = resource;
3757         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3758         return 0;
3759 }
3760
3761 /**
3762  * Find existing table jump resource or create and register a new one.
3763  *
3764  * @param[in, out] dev
3765  *   Pointer to rte_eth_dev structure.
3766  * @param[in, out] tbl
3767  *   Pointer to flow table resource.
3768  * @parm[in, out] dev_flow
3769  *   Pointer to the dev_flow.
3770  * @param[out] error
3771  *   pointer to error structure.
3772  *
3773  * @return
3774  *   0 on success otherwise -errno and errno is set.
3775  */
3776 static int
3777 flow_dv_jump_tbl_resource_register
3778                         (struct rte_eth_dev *dev __rte_unused,
3779                          struct mlx5_flow_tbl_resource *tbl,
3780                          struct mlx5_flow *dev_flow,
3781                          struct rte_flow_error *error __rte_unused)
3782 {
3783         struct mlx5_flow_tbl_data_entry *tbl_data =
3784                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3785
3786         MLX5_ASSERT(tbl);
3787         MLX5_ASSERT(tbl_data->jump.action);
3788         dev_flow->handle->rix_jump = tbl_data->idx;
3789         dev_flow->dv.jump = &tbl_data->jump;
3790         return 0;
3791 }
3792
3793 int
3794 flow_dv_port_id_match_cb(void *tool_ctx __rte_unused,
3795                          struct mlx5_list_entry *entry, void *cb_ctx)
3796 {
3797         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3798         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3799         struct mlx5_flow_dv_port_id_action_resource *res =
3800                                        container_of(entry, typeof(*res), entry);
3801
3802         return ref->port_id != res->port_id;
3803 }
3804
3805 struct mlx5_list_entry *
3806 flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx)
3807 {
3808         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3809         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3810         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3811         struct mlx5_flow_dv_port_id_action_resource *resource;
3812         uint32_t idx;
3813         int ret;
3814
3815         /* Register new port id action resource. */
3816         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3817         if (!resource) {
3818                 rte_flow_error_set(ctx->error, ENOMEM,
3819                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3820                                    "cannot allocate port_id action memory");
3821                 return NULL;
3822         }
3823         *resource = *ref;
3824         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3825                                                         ref->port_id,
3826                                                         &resource->action);
3827         if (ret) {
3828                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3829                 rte_flow_error_set(ctx->error, ENOMEM,
3830                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3831                                    "cannot create action");
3832                 return NULL;
3833         }
3834         resource->idx = idx;
3835         return &resource->entry;
3836 }
3837
3838 struct mlx5_list_entry *
3839 flow_dv_port_id_clone_cb(void *tool_ctx,
3840                          struct mlx5_list_entry *entry __rte_unused,
3841                          void *cb_ctx)
3842 {
3843         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3844         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3845         struct mlx5_flow_dv_port_id_action_resource *resource;
3846         uint32_t idx;
3847
3848         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3849         if (!resource) {
3850                 rte_flow_error_set(ctx->error, ENOMEM,
3851                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3852                                    "cannot allocate port_id action memory");
3853                 return NULL;
3854         }
3855         memcpy(resource, entry, sizeof(*resource));
3856         resource->idx = idx;
3857         return &resource->entry;
3858 }
3859
3860 void
3861 flow_dv_port_id_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3862 {
3863         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3864         struct mlx5_flow_dv_port_id_action_resource *resource =
3865                                   container_of(entry, typeof(*resource), entry);
3866
3867         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
3868 }
3869
3870 /**
3871  * Find existing table port ID resource or create and register a new one.
3872  *
3873  * @param[in, out] dev
3874  *   Pointer to rte_eth_dev structure.
3875  * @param[in, out] ref
3876  *   Pointer to port ID action resource reference.
3877  * @parm[in, out] dev_flow
3878  *   Pointer to the dev_flow.
3879  * @param[out] error
3880  *   pointer to error structure.
3881  *
3882  * @return
3883  *   0 on success otherwise -errno and errno is set.
3884  */
3885 static int
3886 flow_dv_port_id_action_resource_register
3887                         (struct rte_eth_dev *dev,
3888                          struct mlx5_flow_dv_port_id_action_resource *ref,
3889                          struct mlx5_flow *dev_flow,
3890                          struct rte_flow_error *error)
3891 {
3892         struct mlx5_priv *priv = dev->data->dev_private;
3893         struct mlx5_list_entry *entry;
3894         struct mlx5_flow_dv_port_id_action_resource *resource;
3895         struct mlx5_flow_cb_ctx ctx = {
3896                 .error = error,
3897                 .data = ref,
3898         };
3899
3900         entry = mlx5_list_register(priv->sh->port_id_action_list, &ctx);
3901         if (!entry)
3902                 return -rte_errno;
3903         resource = container_of(entry, typeof(*resource), entry);
3904         dev_flow->dv.port_id_action = resource;
3905         dev_flow->handle->rix_port_id_action = resource->idx;
3906         return 0;
3907 }
3908
3909 int
3910 flow_dv_push_vlan_match_cb(void *tool_ctx __rte_unused,
3911                            struct mlx5_list_entry *entry, void *cb_ctx)
3912 {
3913         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3914         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3915         struct mlx5_flow_dv_push_vlan_action_resource *res =
3916                                        container_of(entry, typeof(*res), entry);
3917
3918         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3919 }
3920
3921 struct mlx5_list_entry *
3922 flow_dv_push_vlan_create_cb(void *tool_ctx, void *cb_ctx)
3923 {
3924         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3925         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3926         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3927         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3928         struct mlx5dv_dr_domain *domain;
3929         uint32_t idx;
3930         int ret;
3931
3932         /* Register new port id action resource. */
3933         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3934         if (!resource) {
3935                 rte_flow_error_set(ctx->error, ENOMEM,
3936                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3937                                    "cannot allocate push_vlan action memory");
3938                 return NULL;
3939         }
3940         *resource = *ref;
3941         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3942                 domain = sh->fdb_domain;
3943         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3944                 domain = sh->rx_domain;
3945         else
3946                 domain = sh->tx_domain;
3947         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3948                                                         &resource->action);
3949         if (ret) {
3950                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3951                 rte_flow_error_set(ctx->error, ENOMEM,
3952                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3953                                    "cannot create push vlan action");
3954                 return NULL;
3955         }
3956         resource->idx = idx;
3957         return &resource->entry;
3958 }
3959
3960 struct mlx5_list_entry *
3961 flow_dv_push_vlan_clone_cb(void *tool_ctx,
3962                            struct mlx5_list_entry *entry __rte_unused,
3963                            void *cb_ctx)
3964 {
3965         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3966         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3967         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3968         uint32_t idx;
3969
3970         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3971         if (!resource) {
3972                 rte_flow_error_set(ctx->error, ENOMEM,
3973                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3974                                    "cannot allocate push_vlan action memory");
3975                 return NULL;
3976         }
3977         memcpy(resource, entry, sizeof(*resource));
3978         resource->idx = idx;
3979         return &resource->entry;
3980 }
3981
3982 void
3983 flow_dv_push_vlan_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3984 {
3985         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3986         struct mlx5_flow_dv_push_vlan_action_resource *resource =
3987                                   container_of(entry, typeof(*resource), entry);
3988
3989         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
3990 }
3991
3992 /**
3993  * Find existing push vlan resource or create and register a new one.
3994  *
3995  * @param [in, out] dev
3996  *   Pointer to rte_eth_dev structure.
3997  * @param[in, out] ref
3998  *   Pointer to port ID action resource reference.
3999  * @parm[in, out] dev_flow
4000  *   Pointer to the dev_flow.
4001  * @param[out] error
4002  *   pointer to error structure.
4003  *
4004  * @return
4005  *   0 on success otherwise -errno and errno is set.
4006  */
4007 static int
4008 flow_dv_push_vlan_action_resource_register
4009                        (struct rte_eth_dev *dev,
4010                         struct mlx5_flow_dv_push_vlan_action_resource *ref,
4011                         struct mlx5_flow *dev_flow,
4012                         struct rte_flow_error *error)
4013 {
4014         struct mlx5_priv *priv = dev->data->dev_private;
4015         struct mlx5_flow_dv_push_vlan_action_resource *resource;
4016         struct mlx5_list_entry *entry;
4017         struct mlx5_flow_cb_ctx ctx = {
4018                 .error = error,
4019                 .data = ref,
4020         };
4021
4022         entry = mlx5_list_register(priv->sh->push_vlan_action_list, &ctx);
4023         if (!entry)
4024                 return -rte_errno;
4025         resource = container_of(entry, typeof(*resource), entry);
4026
4027         dev_flow->handle->dvh.rix_push_vlan = resource->idx;
4028         dev_flow->dv.push_vlan_res = resource;
4029         return 0;
4030 }
4031
4032 /**
4033  * Get the size of specific rte_flow_item_type hdr size
4034  *
4035  * @param[in] item_type
4036  *   Tested rte_flow_item_type.
4037  *
4038  * @return
4039  *   sizeof struct item_type, 0 if void or irrelevant.
4040  */
4041 size_t
4042 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
4043 {
4044         size_t retval;
4045
4046         switch (item_type) {
4047         case RTE_FLOW_ITEM_TYPE_ETH:
4048                 retval = sizeof(struct rte_ether_hdr);
4049                 break;
4050         case RTE_FLOW_ITEM_TYPE_VLAN:
4051                 retval = sizeof(struct rte_vlan_hdr);
4052                 break;
4053         case RTE_FLOW_ITEM_TYPE_IPV4:
4054                 retval = sizeof(struct rte_ipv4_hdr);
4055                 break;
4056         case RTE_FLOW_ITEM_TYPE_IPV6:
4057                 retval = sizeof(struct rte_ipv6_hdr);
4058                 break;
4059         case RTE_FLOW_ITEM_TYPE_UDP:
4060                 retval = sizeof(struct rte_udp_hdr);
4061                 break;
4062         case RTE_FLOW_ITEM_TYPE_TCP:
4063                 retval = sizeof(struct rte_tcp_hdr);
4064                 break;
4065         case RTE_FLOW_ITEM_TYPE_VXLAN:
4066         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4067                 retval = sizeof(struct rte_vxlan_hdr);
4068                 break;
4069         case RTE_FLOW_ITEM_TYPE_GRE:
4070         case RTE_FLOW_ITEM_TYPE_NVGRE:
4071                 retval = sizeof(struct rte_gre_hdr);
4072                 break;
4073         case RTE_FLOW_ITEM_TYPE_MPLS:
4074                 retval = sizeof(struct rte_mpls_hdr);
4075                 break;
4076         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
4077         default:
4078                 retval = 0;
4079                 break;
4080         }
4081         return retval;
4082 }
4083
4084 #define MLX5_ENCAP_IPV4_VERSION         0x40
4085 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
4086 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
4087 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
4088 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
4089 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
4090 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
4091
4092 /**
4093  * Convert the encap action data from list of rte_flow_item to raw buffer
4094  *
4095  * @param[in] items
4096  *   Pointer to rte_flow_item objects list.
4097  * @param[out] buf
4098  *   Pointer to the output buffer.
4099  * @param[out] size
4100  *   Pointer to the output buffer size.
4101  * @param[out] error
4102  *   Pointer to the error structure.
4103  *
4104  * @return
4105  *   0 on success, a negative errno value otherwise and rte_errno is set.
4106  */
4107 int
4108 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
4109                            size_t *size, struct rte_flow_error *error)
4110 {
4111         struct rte_ether_hdr *eth = NULL;
4112         struct rte_vlan_hdr *vlan = NULL;
4113         struct rte_ipv4_hdr *ipv4 = NULL;
4114         struct rte_ipv6_hdr *ipv6 = NULL;
4115         struct rte_udp_hdr *udp = NULL;
4116         struct rte_vxlan_hdr *vxlan = NULL;
4117         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
4118         struct rte_gre_hdr *gre = NULL;
4119         size_t len;
4120         size_t temp_size = 0;
4121
4122         if (!items)
4123                 return rte_flow_error_set(error, EINVAL,
4124                                           RTE_FLOW_ERROR_TYPE_ACTION,
4125                                           NULL, "invalid empty data");
4126         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4127                 len = flow_dv_get_item_hdr_len(items->type);
4128                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
4129                         return rte_flow_error_set(error, EINVAL,
4130                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4131                                                   (void *)items->type,
4132                                                   "items total size is too big"
4133                                                   " for encap action");
4134                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
4135                 switch (items->type) {
4136                 case RTE_FLOW_ITEM_TYPE_ETH:
4137                         eth = (struct rte_ether_hdr *)&buf[temp_size];
4138                         break;
4139                 case RTE_FLOW_ITEM_TYPE_VLAN:
4140                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
4141                         if (!eth)
4142                                 return rte_flow_error_set(error, EINVAL,
4143                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4144                                                 (void *)items->type,
4145                                                 "eth header not found");
4146                         if (!eth->ether_type)
4147                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
4148                         break;
4149                 case RTE_FLOW_ITEM_TYPE_IPV4:
4150                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
4151                         if (!vlan && !eth)
4152                                 return rte_flow_error_set(error, EINVAL,
4153                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4154                                                 (void *)items->type,
4155                                                 "neither eth nor vlan"
4156                                                 " header found");
4157                         if (vlan && !vlan->eth_proto)
4158                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4159                         else if (eth && !eth->ether_type)
4160                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4161                         if (!ipv4->version_ihl)
4162                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
4163                                                     MLX5_ENCAP_IPV4_IHL_MIN;
4164                         if (!ipv4->time_to_live)
4165                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
4166                         break;
4167                 case RTE_FLOW_ITEM_TYPE_IPV6:
4168                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
4169                         if (!vlan && !eth)
4170                                 return rte_flow_error_set(error, EINVAL,
4171                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4172                                                 (void *)items->type,
4173                                                 "neither eth nor vlan"
4174                                                 " header found");
4175                         if (vlan && !vlan->eth_proto)
4176                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4177                         else if (eth && !eth->ether_type)
4178                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4179                         if (!ipv6->vtc_flow)
4180                                 ipv6->vtc_flow =
4181                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
4182                         if (!ipv6->hop_limits)
4183                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
4184                         break;
4185                 case RTE_FLOW_ITEM_TYPE_UDP:
4186                         udp = (struct rte_udp_hdr *)&buf[temp_size];
4187                         if (!ipv4 && !ipv6)
4188                                 return rte_flow_error_set(error, EINVAL,
4189                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4190                                                 (void *)items->type,
4191                                                 "ip header not found");
4192                         if (ipv4 && !ipv4->next_proto_id)
4193                                 ipv4->next_proto_id = IPPROTO_UDP;
4194                         else if (ipv6 && !ipv6->proto)
4195                                 ipv6->proto = IPPROTO_UDP;
4196                         break;
4197                 case RTE_FLOW_ITEM_TYPE_VXLAN:
4198                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
4199                         if (!udp)
4200                                 return rte_flow_error_set(error, EINVAL,
4201                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4202                                                 (void *)items->type,
4203                                                 "udp header not found");
4204                         if (!udp->dst_port)
4205                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
4206                         if (!vxlan->vx_flags)
4207                                 vxlan->vx_flags =
4208                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
4209                         break;
4210                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4211                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
4212                         if (!udp)
4213                                 return rte_flow_error_set(error, EINVAL,
4214                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4215                                                 (void *)items->type,
4216                                                 "udp header not found");
4217                         if (!vxlan_gpe->proto)
4218                                 return rte_flow_error_set(error, EINVAL,
4219                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4220                                                 (void *)items->type,
4221                                                 "next protocol not found");
4222                         if (!udp->dst_port)
4223                                 udp->dst_port =
4224                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
4225                         if (!vxlan_gpe->vx_flags)
4226                                 vxlan_gpe->vx_flags =
4227                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
4228                         break;
4229                 case RTE_FLOW_ITEM_TYPE_GRE:
4230                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4231                         gre = (struct rte_gre_hdr *)&buf[temp_size];
4232                         if (!gre->proto)
4233                                 return rte_flow_error_set(error, EINVAL,
4234                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4235                                                 (void *)items->type,
4236                                                 "next protocol not found");
4237                         if (!ipv4 && !ipv6)
4238                                 return rte_flow_error_set(error, EINVAL,
4239                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4240                                                 (void *)items->type,
4241                                                 "ip header not found");
4242                         if (ipv4 && !ipv4->next_proto_id)
4243                                 ipv4->next_proto_id = IPPROTO_GRE;
4244                         else if (ipv6 && !ipv6->proto)
4245                                 ipv6->proto = IPPROTO_GRE;
4246                         break;
4247                 case RTE_FLOW_ITEM_TYPE_VOID:
4248                         break;
4249                 default:
4250                         return rte_flow_error_set(error, EINVAL,
4251                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4252                                                   (void *)items->type,
4253                                                   "unsupported item type");
4254                         break;
4255                 }
4256                 temp_size += len;
4257         }
4258         *size = temp_size;
4259         return 0;
4260 }
4261
4262 static int
4263 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4264 {
4265         struct rte_ether_hdr *eth = NULL;
4266         struct rte_vlan_hdr *vlan = NULL;
4267         struct rte_ipv6_hdr *ipv6 = NULL;
4268         struct rte_udp_hdr *udp = NULL;
4269         char *next_hdr;
4270         uint16_t proto;
4271
4272         eth = (struct rte_ether_hdr *)data;
4273         next_hdr = (char *)(eth + 1);
4274         proto = RTE_BE16(eth->ether_type);
4275
4276         /* VLAN skipping */
4277         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4278                 vlan = (struct rte_vlan_hdr *)next_hdr;
4279                 proto = RTE_BE16(vlan->eth_proto);
4280                 next_hdr += sizeof(struct rte_vlan_hdr);
4281         }
4282
4283         /* HW calculates IPv4 csum. no need to proceed */
4284         if (proto == RTE_ETHER_TYPE_IPV4)
4285                 return 0;
4286
4287         /* non IPv4/IPv6 header. not supported */
4288         if (proto != RTE_ETHER_TYPE_IPV6) {
4289                 return rte_flow_error_set(error, ENOTSUP,
4290                                           RTE_FLOW_ERROR_TYPE_ACTION,
4291                                           NULL, "Cannot offload non IPv4/IPv6");
4292         }
4293
4294         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4295
4296         /* ignore non UDP */
4297         if (ipv6->proto != IPPROTO_UDP)
4298                 return 0;
4299
4300         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4301         udp->dgram_cksum = 0;
4302
4303         return 0;
4304 }
4305
4306 /**
4307  * Convert L2 encap action to DV specification.
4308  *
4309  * @param[in] dev
4310  *   Pointer to rte_eth_dev structure.
4311  * @param[in] action
4312  *   Pointer to action structure.
4313  * @param[in, out] dev_flow
4314  *   Pointer to the mlx5_flow.
4315  * @param[in] transfer
4316  *   Mark if the flow is E-Switch flow.
4317  * @param[out] error
4318  *   Pointer to the error structure.
4319  *
4320  * @return
4321  *   0 on success, a negative errno value otherwise and rte_errno is set.
4322  */
4323 static int
4324 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4325                                const struct rte_flow_action *action,
4326                                struct mlx5_flow *dev_flow,
4327                                uint8_t transfer,
4328                                struct rte_flow_error *error)
4329 {
4330         const struct rte_flow_item *encap_data;
4331         const struct rte_flow_action_raw_encap *raw_encap_data;
4332         struct mlx5_flow_dv_encap_decap_resource res = {
4333                 .reformat_type =
4334                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4335                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4336                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4337         };
4338
4339         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4340                 raw_encap_data =
4341                         (const struct rte_flow_action_raw_encap *)action->conf;
4342                 res.size = raw_encap_data->size;
4343                 memcpy(res.buf, raw_encap_data->data, res.size);
4344         } else {
4345                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4346                         encap_data =
4347                                 ((const struct rte_flow_action_vxlan_encap *)
4348                                                 action->conf)->definition;
4349                 else
4350                         encap_data =
4351                                 ((const struct rte_flow_action_nvgre_encap *)
4352                                                 action->conf)->definition;
4353                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4354                                                &res.size, error))
4355                         return -rte_errno;
4356         }
4357         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4358                 return -rte_errno;
4359         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4360                 return rte_flow_error_set(error, EINVAL,
4361                                           RTE_FLOW_ERROR_TYPE_ACTION,
4362                                           NULL, "can't create L2 encap action");
4363         return 0;
4364 }
4365
4366 /**
4367  * Convert L2 decap action to DV specification.
4368  *
4369  * @param[in] dev
4370  *   Pointer to rte_eth_dev structure.
4371  * @param[in, out] dev_flow
4372  *   Pointer to the mlx5_flow.
4373  * @param[in] transfer
4374  *   Mark if the flow is E-Switch flow.
4375  * @param[out] error
4376  *   Pointer to the error structure.
4377  *
4378  * @return
4379  *   0 on success, a negative errno value otherwise and rte_errno is set.
4380  */
4381 static int
4382 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4383                                struct mlx5_flow *dev_flow,
4384                                uint8_t transfer,
4385                                struct rte_flow_error *error)
4386 {
4387         struct mlx5_flow_dv_encap_decap_resource res = {
4388                 .size = 0,
4389                 .reformat_type =
4390                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4391                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4392                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4393         };
4394
4395         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4396                 return rte_flow_error_set(error, EINVAL,
4397                                           RTE_FLOW_ERROR_TYPE_ACTION,
4398                                           NULL, "can't create L2 decap action");
4399         return 0;
4400 }
4401
4402 /**
4403  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4404  *
4405  * @param[in] dev
4406  *   Pointer to rte_eth_dev structure.
4407  * @param[in] action
4408  *   Pointer to action structure.
4409  * @param[in, out] dev_flow
4410  *   Pointer to the mlx5_flow.
4411  * @param[in] attr
4412  *   Pointer to the flow attributes.
4413  * @param[out] error
4414  *   Pointer to the error structure.
4415  *
4416  * @return
4417  *   0 on success, a negative errno value otherwise and rte_errno is set.
4418  */
4419 static int
4420 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4421                                 const struct rte_flow_action *action,
4422                                 struct mlx5_flow *dev_flow,
4423                                 const struct rte_flow_attr *attr,
4424                                 struct rte_flow_error *error)
4425 {
4426         const struct rte_flow_action_raw_encap *encap_data;
4427         struct mlx5_flow_dv_encap_decap_resource res;
4428
4429         memset(&res, 0, sizeof(res));
4430         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4431         res.size = encap_data->size;
4432         memcpy(res.buf, encap_data->data, res.size);
4433         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4434                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4435                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4436         if (attr->transfer)
4437                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4438         else
4439                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4440                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4441         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4442                 return rte_flow_error_set(error, EINVAL,
4443                                           RTE_FLOW_ERROR_TYPE_ACTION,
4444                                           NULL, "can't create encap action");
4445         return 0;
4446 }
4447
4448 /**
4449  * Create action push VLAN.
4450  *
4451  * @param[in] dev
4452  *   Pointer to rte_eth_dev structure.
4453  * @param[in] attr
4454  *   Pointer to the flow attributes.
4455  * @param[in] vlan
4456  *   Pointer to the vlan to push to the Ethernet header.
4457  * @param[in, out] dev_flow
4458  *   Pointer to the mlx5_flow.
4459  * @param[out] error
4460  *   Pointer to the error structure.
4461  *
4462  * @return
4463  *   0 on success, a negative errno value otherwise and rte_errno is set.
4464  */
4465 static int
4466 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4467                                 const struct rte_flow_attr *attr,
4468                                 const struct rte_vlan_hdr *vlan,
4469                                 struct mlx5_flow *dev_flow,
4470                                 struct rte_flow_error *error)
4471 {
4472         struct mlx5_flow_dv_push_vlan_action_resource res;
4473
4474         memset(&res, 0, sizeof(res));
4475         res.vlan_tag =
4476                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4477                                  vlan->vlan_tci);
4478         if (attr->transfer)
4479                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4480         else
4481                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4482                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4483         return flow_dv_push_vlan_action_resource_register
4484                                             (dev, &res, dev_flow, error);
4485 }
4486
4487 /**
4488  * Validate the modify-header actions.
4489  *
4490  * @param[in] action_flags
4491  *   Holds the actions detected until now.
4492  * @param[in] action
4493  *   Pointer to the modify action.
4494  * @param[out] error
4495  *   Pointer to error structure.
4496  *
4497  * @return
4498  *   0 on success, a negative errno value otherwise and rte_errno is set.
4499  */
4500 static int
4501 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4502                                    const struct rte_flow_action *action,
4503                                    struct rte_flow_error *error)
4504 {
4505         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4506                 return rte_flow_error_set(error, EINVAL,
4507                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4508                                           NULL, "action configuration not set");
4509         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4510                 return rte_flow_error_set(error, EINVAL,
4511                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4512                                           "can't have encap action before"
4513                                           " modify action");
4514         return 0;
4515 }
4516
4517 /**
4518  * Validate the modify-header MAC address actions.
4519  *
4520  * @param[in] action_flags
4521  *   Holds the actions detected until now.
4522  * @param[in] action
4523  *   Pointer to the modify action.
4524  * @param[in] item_flags
4525  *   Holds the items detected.
4526  * @param[out] error
4527  *   Pointer to error structure.
4528  *
4529  * @return
4530  *   0 on success, a negative errno value otherwise and rte_errno is set.
4531  */
4532 static int
4533 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4534                                    const struct rte_flow_action *action,
4535                                    const uint64_t item_flags,
4536                                    struct rte_flow_error *error)
4537 {
4538         int ret = 0;
4539
4540         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4541         if (!ret) {
4542                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4543                         return rte_flow_error_set(error, EINVAL,
4544                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4545                                                   NULL,
4546                                                   "no L2 item in pattern");
4547         }
4548         return ret;
4549 }
4550
4551 /**
4552  * Validate the modify-header IPv4 address actions.
4553  *
4554  * @param[in] action_flags
4555  *   Holds the actions detected until now.
4556  * @param[in] action
4557  *   Pointer to the modify action.
4558  * @param[in] item_flags
4559  *   Holds the items detected.
4560  * @param[out] error
4561  *   Pointer to error structure.
4562  *
4563  * @return
4564  *   0 on success, a negative errno value otherwise and rte_errno is set.
4565  */
4566 static int
4567 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4568                                     const struct rte_flow_action *action,
4569                                     const uint64_t item_flags,
4570                                     struct rte_flow_error *error)
4571 {
4572         int ret = 0;
4573         uint64_t layer;
4574
4575         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4576         if (!ret) {
4577                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4578                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4579                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4580                 if (!(item_flags & layer))
4581                         return rte_flow_error_set(error, EINVAL,
4582                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4583                                                   NULL,
4584                                                   "no ipv4 item in pattern");
4585         }
4586         return ret;
4587 }
4588
4589 /**
4590  * Validate the modify-header IPv6 address actions.
4591  *
4592  * @param[in] action_flags
4593  *   Holds the actions detected until now.
4594  * @param[in] action
4595  *   Pointer to the modify action.
4596  * @param[in] item_flags
4597  *   Holds the items detected.
4598  * @param[out] error
4599  *   Pointer to error structure.
4600  *
4601  * @return
4602  *   0 on success, a negative errno value otherwise and rte_errno is set.
4603  */
4604 static int
4605 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4606                                     const struct rte_flow_action *action,
4607                                     const uint64_t item_flags,
4608                                     struct rte_flow_error *error)
4609 {
4610         int ret = 0;
4611         uint64_t layer;
4612
4613         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4614         if (!ret) {
4615                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4616                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4617                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4618                 if (!(item_flags & layer))
4619                         return rte_flow_error_set(error, EINVAL,
4620                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4621                                                   NULL,
4622                                                   "no ipv6 item in pattern");
4623         }
4624         return ret;
4625 }
4626
4627 /**
4628  * Validate the modify-header TP actions.
4629  *
4630  * @param[in] action_flags
4631  *   Holds the actions detected until now.
4632  * @param[in] action
4633  *   Pointer to the modify action.
4634  * @param[in] item_flags
4635  *   Holds the items detected.
4636  * @param[out] error
4637  *   Pointer to error structure.
4638  *
4639  * @return
4640  *   0 on success, a negative errno value otherwise and rte_errno is set.
4641  */
4642 static int
4643 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4644                                   const struct rte_flow_action *action,
4645                                   const uint64_t item_flags,
4646                                   struct rte_flow_error *error)
4647 {
4648         int ret = 0;
4649         uint64_t layer;
4650
4651         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4652         if (!ret) {
4653                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4654                                  MLX5_FLOW_LAYER_INNER_L4 :
4655                                  MLX5_FLOW_LAYER_OUTER_L4;
4656                 if (!(item_flags & layer))
4657                         return rte_flow_error_set(error, EINVAL,
4658                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4659                                                   NULL, "no transport layer "
4660                                                   "in pattern");
4661         }
4662         return ret;
4663 }
4664
4665 /**
4666  * Validate the modify-header actions of increment/decrement
4667  * TCP Sequence-number.
4668  *
4669  * @param[in] action_flags
4670  *   Holds the actions detected until now.
4671  * @param[in] action
4672  *   Pointer to the modify action.
4673  * @param[in] item_flags
4674  *   Holds the items detected.
4675  * @param[out] error
4676  *   Pointer to error structure.
4677  *
4678  * @return
4679  *   0 on success, a negative errno value otherwise and rte_errno is set.
4680  */
4681 static int
4682 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4683                                        const struct rte_flow_action *action,
4684                                        const uint64_t item_flags,
4685                                        struct rte_flow_error *error)
4686 {
4687         int ret = 0;
4688         uint64_t layer;
4689
4690         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4691         if (!ret) {
4692                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4693                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4694                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4695                 if (!(item_flags & layer))
4696                         return rte_flow_error_set(error, EINVAL,
4697                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4698                                                   NULL, "no TCP item in"
4699                                                   " pattern");
4700                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4701                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4702                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4703                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4704                         return rte_flow_error_set(error, EINVAL,
4705                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4706                                                   NULL,
4707                                                   "cannot decrease and increase"
4708                                                   " TCP sequence number"
4709                                                   " at the same time");
4710         }
4711         return ret;
4712 }
4713
4714 /**
4715  * Validate the modify-header actions of increment/decrement
4716  * TCP Acknowledgment number.
4717  *
4718  * @param[in] action_flags
4719  *   Holds the actions detected until now.
4720  * @param[in] action
4721  *   Pointer to the modify action.
4722  * @param[in] item_flags
4723  *   Holds the items detected.
4724  * @param[out] error
4725  *   Pointer to error structure.
4726  *
4727  * @return
4728  *   0 on success, a negative errno value otherwise and rte_errno is set.
4729  */
4730 static int
4731 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4732                                        const struct rte_flow_action *action,
4733                                        const uint64_t item_flags,
4734                                        struct rte_flow_error *error)
4735 {
4736         int ret = 0;
4737         uint64_t layer;
4738
4739         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4740         if (!ret) {
4741                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4742                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4743                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4744                 if (!(item_flags & layer))
4745                         return rte_flow_error_set(error, EINVAL,
4746                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4747                                                   NULL, "no TCP item in"
4748                                                   " pattern");
4749                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4750                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4751                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4752                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4753                         return rte_flow_error_set(error, EINVAL,
4754                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4755                                                   NULL,
4756                                                   "cannot decrease and increase"
4757                                                   " TCP acknowledgment number"
4758                                                   " at the same time");
4759         }
4760         return ret;
4761 }
4762
4763 /**
4764  * Validate the modify-header TTL actions.
4765  *
4766  * @param[in] action_flags
4767  *   Holds the actions detected until now.
4768  * @param[in] action
4769  *   Pointer to the modify action.
4770  * @param[in] item_flags
4771  *   Holds the items detected.
4772  * @param[out] error
4773  *   Pointer to error structure.
4774  *
4775  * @return
4776  *   0 on success, a negative errno value otherwise and rte_errno is set.
4777  */
4778 static int
4779 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4780                                    const struct rte_flow_action *action,
4781                                    const uint64_t item_flags,
4782                                    struct rte_flow_error *error)
4783 {
4784         int ret = 0;
4785         uint64_t layer;
4786
4787         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4788         if (!ret) {
4789                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4790                                  MLX5_FLOW_LAYER_INNER_L3 :
4791                                  MLX5_FLOW_LAYER_OUTER_L3;
4792                 if (!(item_flags & layer))
4793                         return rte_flow_error_set(error, EINVAL,
4794                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4795                                                   NULL,
4796                                                   "no IP protocol in pattern");
4797         }
4798         return ret;
4799 }
4800
4801 /**
4802  * Validate the generic modify field actions.
4803  * @param[in] dev
4804  *   Pointer to the rte_eth_dev structure.
4805  * @param[in] action_flags
4806  *   Holds the actions detected until now.
4807  * @param[in] action
4808  *   Pointer to the modify action.
4809  * @param[in] attr
4810  *   Pointer to the flow attributes.
4811  * @param[out] error
4812  *   Pointer to error structure.
4813  *
4814  * @return
4815  *   Number of header fields to modify (0 or more) on success,
4816  *   a negative errno value otherwise and rte_errno is set.
4817  */
4818 static int
4819 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4820                                    const uint64_t action_flags,
4821                                    const struct rte_flow_action *action,
4822                                    const struct rte_flow_attr *attr,
4823                                    struct rte_flow_error *error)
4824 {
4825         int ret = 0;
4826         struct mlx5_priv *priv = dev->data->dev_private;
4827         struct mlx5_sh_config *config = &priv->sh->config;
4828         const struct rte_flow_action_modify_field *action_modify_field =
4829                 action->conf;
4830         uint32_t dst_width = mlx5_flow_item_field_width(dev,
4831                                 action_modify_field->dst.field,
4832                                 -1, attr, error);
4833         uint32_t src_width = mlx5_flow_item_field_width(dev,
4834                                 action_modify_field->src.field,
4835                                 dst_width, attr, error);
4836
4837         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4838         if (ret)
4839                 return ret;
4840
4841         if (action_modify_field->width == 0)
4842                 return rte_flow_error_set(error, EINVAL,
4843                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4844                                 "no bits are requested to be modified");
4845         else if (action_modify_field->width > dst_width ||
4846                  action_modify_field->width > src_width)
4847                 return rte_flow_error_set(error, EINVAL,
4848                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4849                                 "cannot modify more bits than"
4850                                 " the width of a field");
4851         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4852             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4853                 if ((action_modify_field->dst.offset +
4854                      action_modify_field->width > dst_width) ||
4855                     (action_modify_field->dst.offset % 32))
4856                         return rte_flow_error_set(error, EINVAL,
4857                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4858                                         "destination offset is too big"
4859                                         " or not aligned to 4 bytes");
4860                 if (action_modify_field->dst.level &&
4861                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4862                         return rte_flow_error_set(error, ENOTSUP,
4863                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4864                                         "inner header fields modification"
4865                                         " is not supported");
4866         }
4867         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4868             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4869                 if (!attr->transfer && !attr->group)
4870                         return rte_flow_error_set(error, ENOTSUP,
4871                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4872                                         "modify field action is not"
4873                                         " supported for group 0");
4874                 if ((action_modify_field->src.offset +
4875                      action_modify_field->width > src_width) ||
4876                     (action_modify_field->src.offset % 32))
4877                         return rte_flow_error_set(error, EINVAL,
4878                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4879                                         "source offset is too big"
4880                                         " or not aligned to 4 bytes");
4881                 if (action_modify_field->src.level &&
4882                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4883                         return rte_flow_error_set(error, ENOTSUP,
4884                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4885                                         "inner header fields modification"
4886                                         " is not supported");
4887         }
4888         if ((action_modify_field->dst.field ==
4889              action_modify_field->src.field) &&
4890             (action_modify_field->dst.level ==
4891              action_modify_field->src.level))
4892                 return rte_flow_error_set(error, EINVAL,
4893                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4894                                 "source and destination fields"
4895                                 " cannot be the same");
4896         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4897             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER ||
4898             action_modify_field->dst.field == RTE_FLOW_FIELD_MARK)
4899                 return rte_flow_error_set(error, EINVAL,
4900                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4901                                 "mark, immediate value or a pointer to it"
4902                                 " cannot be used as a destination");
4903         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4904             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4905                 return rte_flow_error_set(error, ENOTSUP,
4906                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4907                                 "modifications of an arbitrary"
4908                                 " place in a packet is not supported");
4909         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4910             action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4911                 return rte_flow_error_set(error, ENOTSUP,
4912                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4913                                 "modifications of the 802.1Q Tag"
4914                                 " Identifier is not supported");
4915         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4916             action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4917                 return rte_flow_error_set(error, ENOTSUP,
4918                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4919                                 "modifications of the VXLAN Network"
4920                                 " Identifier is not supported");
4921         if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4922             action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4923                 return rte_flow_error_set(error, ENOTSUP,
4924                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4925                                 "modifications of the GENEVE Network"
4926                                 " Identifier is not supported");
4927         if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4928             action_modify_field->src.field == RTE_FLOW_FIELD_MARK)
4929                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4930                     !mlx5_flow_ext_mreg_supported(dev))
4931                         return rte_flow_error_set(error, ENOTSUP,
4932                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4933                                         "cannot modify mark in legacy mode"
4934                                         " or without extensive registers");
4935         if (action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
4936             action_modify_field->src.field == RTE_FLOW_FIELD_META) {
4937                 if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
4938                     !mlx5_flow_ext_mreg_supported(dev))
4939                         return rte_flow_error_set(error, ENOTSUP,
4940                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4941                                         "cannot modify meta without"
4942                                         " extensive registers support");
4943                 ret = flow_dv_get_metadata_reg(dev, attr, error);
4944                 if (ret < 0 || ret == REG_NON)
4945                         return rte_flow_error_set(error, ENOTSUP,
4946                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4947                                         "cannot modify meta without"
4948                                         " extensive registers available");
4949         }
4950         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4951                 return rte_flow_error_set(error, ENOTSUP,
4952                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4953                                 "add and sub operations"
4954                                 " are not supported");
4955         return (action_modify_field->width / 32) +
4956                !!(action_modify_field->width % 32);
4957 }
4958
4959 /**
4960  * Validate jump action.
4961  *
4962  * @param[in] action
4963  *   Pointer to the jump action.
4964  * @param[in] action_flags
4965  *   Holds the actions detected until now.
4966  * @param[in] attributes
4967  *   Pointer to flow attributes
4968  * @param[in] external
4969  *   Action belongs to flow rule created by request external to PMD.
4970  * @param[out] error
4971  *   Pointer to error structure.
4972  *
4973  * @return
4974  *   0 on success, a negative errno value otherwise and rte_errno is set.
4975  */
4976 static int
4977 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4978                              const struct mlx5_flow_tunnel *tunnel,
4979                              const struct rte_flow_action *action,
4980                              uint64_t action_flags,
4981                              const struct rte_flow_attr *attributes,
4982                              bool external, struct rte_flow_error *error)
4983 {
4984         uint32_t target_group, table = 0;
4985         int ret = 0;
4986         struct flow_grp_info grp_info = {
4987                 .external = !!external,
4988                 .transfer = !!attributes->transfer,
4989                 .fdb_def_rule = 1,
4990                 .std_tbl_fix = 0
4991         };
4992         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4993                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4994                 return rte_flow_error_set(error, EINVAL,
4995                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4996                                           "can't have 2 fate actions in"
4997                                           " same flow");
4998         if (!action->conf)
4999                 return rte_flow_error_set(error, EINVAL,
5000                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5001                                           NULL, "action configuration not set");
5002         target_group =
5003                 ((const struct rte_flow_action_jump *)action->conf)->group;
5004         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
5005                                        &grp_info, error);
5006         if (ret)
5007                 return ret;
5008         if (attributes->group == target_group &&
5009             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
5010                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
5011                 return rte_flow_error_set(error, EINVAL,
5012                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5013                                           "target group must be other than"
5014                                           " the current flow group");
5015         if (table == 0)
5016                 return rte_flow_error_set(error, EINVAL,
5017                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5018                                           NULL, "root table shouldn't be destination");
5019         return 0;
5020 }
5021
5022 /*
5023  * Validate action PORT_ID / REPRESENTED_PORT.
5024  *
5025  * @param[in] dev
5026  *   Pointer to rte_eth_dev structure.
5027  * @param[in] action_flags
5028  *   Bit-fields that holds the actions detected until now.
5029  * @param[in] action
5030  *   PORT_ID / REPRESENTED_PORT action structure.
5031  * @param[in] attr
5032  *   Attributes of flow that includes this action.
5033  * @param[out] error
5034  *   Pointer to error structure.
5035  *
5036  * @return
5037  *   0 on success, a negative errno value otherwise and rte_errno is set.
5038  */
5039 static int
5040 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
5041                                 uint64_t action_flags,
5042                                 const struct rte_flow_action *action,
5043                                 const struct rte_flow_attr *attr,
5044                                 struct rte_flow_error *error)
5045 {
5046         const struct rte_flow_action_port_id *port_id;
5047         const struct rte_flow_action_ethdev *ethdev;
5048         struct mlx5_priv *act_priv;
5049         struct mlx5_priv *dev_priv;
5050         uint16_t port;
5051
5052         if (!attr->transfer)
5053                 return rte_flow_error_set(error, ENOTSUP,
5054                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5055                                           NULL,
5056                                           "port action is valid in transfer"
5057                                           " mode only");
5058         if (!action || !action->conf)
5059                 return rte_flow_error_set(error, ENOTSUP,
5060                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5061                                           NULL,
5062                                           "port action parameters must be"
5063                                           " specified");
5064         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
5065                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5066                 return rte_flow_error_set(error, EINVAL,
5067                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5068                                           "can have only one fate actions in"
5069                                           " a flow");
5070         dev_priv = mlx5_dev_to_eswitch_info(dev);
5071         if (!dev_priv)
5072                 return rte_flow_error_set(error, rte_errno,
5073                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5074                                           NULL,
5075                                           "failed to obtain E-Switch info");
5076         switch (action->type) {
5077         case RTE_FLOW_ACTION_TYPE_PORT_ID:
5078                 port_id = action->conf;
5079                 port = port_id->original ? dev->data->port_id : port_id->id;
5080                 break;
5081         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5082                 ethdev = action->conf;
5083                 port = ethdev->port_id;
5084                 break;
5085         default:
5086                 MLX5_ASSERT(false);
5087                 return rte_flow_error_set
5088                                 (error, EINVAL,
5089                                  RTE_FLOW_ERROR_TYPE_ACTION, action,
5090                                  "unknown E-Switch action");
5091         }
5092         act_priv = mlx5_port_to_eswitch_info(port, false);
5093         if (!act_priv)
5094                 return rte_flow_error_set
5095                                 (error, rte_errno,
5096                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, action->conf,
5097                                  "failed to obtain E-Switch port id for port");
5098         if (act_priv->domain_id != dev_priv->domain_id)
5099                 return rte_flow_error_set
5100                                 (error, EINVAL,
5101                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5102                                  "port does not belong to"
5103                                  " E-Switch being configured");
5104         return 0;
5105 }
5106
5107 /**
5108  * Get the maximum number of modify header actions.
5109  *
5110  * @param dev
5111  *   Pointer to rte_eth_dev structure.
5112  * @param root
5113  *   Whether action is on root table.
5114  *
5115  * @return
5116  *   Max number of modify header actions device can support.
5117  */
5118 static inline unsigned int
5119 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
5120                               bool root)
5121 {
5122         /*
5123          * There's no way to directly query the max capacity from FW.
5124          * The maximal value on root table should be assumed to be supported.
5125          */
5126         if (!root)
5127                 return MLX5_MAX_MODIFY_NUM;
5128         else
5129                 return MLX5_ROOT_TBL_MODIFY_NUM;
5130 }
5131
5132 /**
5133  * Validate the meter action.
5134  *
5135  * @param[in] dev
5136  *   Pointer to rte_eth_dev structure.
5137  * @param[in] action_flags
5138  *   Bit-fields that holds the actions detected until now.
5139  * @param[in] item_flags
5140  *   Holds the items detected.
5141  * @param[in] action
5142  *   Pointer to the meter action.
5143  * @param[in] attr
5144  *   Attributes of flow that includes this action.
5145  * @param[in] port_id_item
5146  *   Pointer to item indicating port id.
5147  * @param[out] error
5148  *   Pointer to error structure.
5149  *
5150  * @return
5151  *   0 on success, a negative errno value otherwise and rte_errno is set.
5152  */
5153 static int
5154 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
5155                                 uint64_t action_flags, uint64_t item_flags,
5156                                 const struct rte_flow_action *action,
5157                                 const struct rte_flow_attr *attr,
5158                                 const struct rte_flow_item *port_id_item,
5159                                 bool *def_policy,
5160                                 struct rte_flow_error *error)
5161 {
5162         struct mlx5_priv *priv = dev->data->dev_private;
5163         const struct rte_flow_action_meter *am = action->conf;
5164         struct mlx5_flow_meter_info *fm;
5165         struct mlx5_flow_meter_policy *mtr_policy;
5166         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
5167
5168         if (!am)
5169                 return rte_flow_error_set(error, EINVAL,
5170                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5171                                           "meter action conf is NULL");
5172
5173         if (action_flags & MLX5_FLOW_ACTION_METER)
5174                 return rte_flow_error_set(error, ENOTSUP,
5175                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5176                                           "meter chaining not support");
5177         if (action_flags & MLX5_FLOW_ACTION_JUMP)
5178                 return rte_flow_error_set(error, ENOTSUP,
5179                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5180                                           "meter with jump not support");
5181         if (!priv->mtr_en)
5182                 return rte_flow_error_set(error, ENOTSUP,
5183                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5184                                           NULL,
5185                                           "meter action not supported");
5186         fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
5187         if (!fm)
5188                 return rte_flow_error_set(error, EINVAL,
5189                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5190                                           "Meter not found");
5191         /* aso meter can always be shared by different domains */
5192         if (fm->ref_cnt && !priv->sh->meter_aso_en &&
5193             !(fm->transfer == attr->transfer ||
5194               (!fm->ingress && !attr->ingress && attr->egress) ||
5195               (!fm->egress && !attr->egress && attr->ingress)))
5196                 return rte_flow_error_set(error, EINVAL,
5197                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5198                         "Flow attributes domain are either invalid "
5199                         "or have a domain conflict with current "
5200                         "meter attributes");
5201         if (fm->def_policy) {
5202                 if (!((attr->transfer &&
5203                         mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
5204                         (attr->egress &&
5205                         mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
5206                         (attr->ingress &&
5207                         mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
5208                         return rte_flow_error_set(error, EINVAL,
5209                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5210                                           "Flow attributes domain "
5211                                           "have a conflict with current "
5212                                           "meter domain attributes");
5213                 *def_policy = true;
5214         } else {
5215                 mtr_policy = mlx5_flow_meter_policy_find(dev,
5216                                                 fm->policy_id, NULL);
5217                 if (!mtr_policy)
5218                         return rte_flow_error_set(error, EINVAL,
5219                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5220                                           "Invalid policy id for meter ");
5221                 if (!((attr->transfer && mtr_policy->transfer) ||
5222                         (attr->egress && mtr_policy->egress) ||
5223                         (attr->ingress && mtr_policy->ingress)))
5224                         return rte_flow_error_set(error, EINVAL,
5225                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5226                                           "Flow attributes domain "
5227                                           "have a conflict with current "
5228                                           "meter domain attributes");
5229                 if (attr->transfer && mtr_policy->dev) {
5230                         /**
5231                          * When policy has fate action of port_id,
5232                          * the flow should have the same src port as policy.
5233                          */
5234                         struct mlx5_priv *policy_port_priv =
5235                                         mtr_policy->dev->data->dev_private;
5236                         int32_t flow_src_port = priv->representor_id;
5237
5238                         if (port_id_item) {
5239                                 const struct rte_flow_item_port_id *spec =
5240                                                         port_id_item->spec;
5241                                 struct mlx5_priv *port_priv =
5242                                         mlx5_port_to_eswitch_info(spec->id,
5243                                                                   false);
5244                                 if (!port_priv)
5245                                         return rte_flow_error_set(error,
5246                                                 rte_errno,
5247                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5248                                                 spec,
5249                                                 "Failed to get port info.");
5250                                 flow_src_port = port_priv->representor_id;
5251                         }
5252                         if (flow_src_port != policy_port_priv->representor_id)
5253                                 return rte_flow_error_set(error,
5254                                                 rte_errno,
5255                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5256                                                 NULL,
5257                                                 "Flow and meter policy "
5258                                                 "have different src port.");
5259                 } else if (mtr_policy->is_rss) {
5260                         struct mlx5_flow_meter_policy *fp;
5261                         struct mlx5_meter_policy_action_container *acg;
5262                         struct mlx5_meter_policy_action_container *acy;
5263                         const struct rte_flow_action *rss_act;
5264                         int ret;
5265
5266                         fp = mlx5_flow_meter_hierarchy_get_final_policy(dev,
5267                                                                 mtr_policy);
5268                         if (fp == NULL)
5269                                 return rte_flow_error_set(error, EINVAL,
5270                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5271                                                   "Unable to get the final "
5272                                                   "policy in the hierarchy");
5273                         acg = &fp->act_cnt[RTE_COLOR_GREEN];
5274                         acy = &fp->act_cnt[RTE_COLOR_YELLOW];
5275                         MLX5_ASSERT(acg->fate_action ==
5276                                     MLX5_FLOW_FATE_SHARED_RSS ||
5277                                     acy->fate_action ==
5278                                     MLX5_FLOW_FATE_SHARED_RSS);
5279                         if (acg->fate_action == MLX5_FLOW_FATE_SHARED_RSS)
5280                                 rss_act = acg->rss;
5281                         else
5282                                 rss_act = acy->rss;
5283                         ret = mlx5_flow_validate_action_rss(rss_act,
5284                                         action_flags, dev, attr,
5285                                         item_flags, error);
5286                         if (ret)
5287                                 return ret;
5288                 }
5289                 *def_policy = false;
5290         }
5291         return 0;
5292 }
5293
5294 /**
5295  * Validate the age action.
5296  *
5297  * @param[in] action_flags
5298  *   Holds the actions detected until now.
5299  * @param[in] action
5300  *   Pointer to the age action.
5301  * @param[in] dev
5302  *   Pointer to the Ethernet device structure.
5303  * @param[out] error
5304  *   Pointer to error structure.
5305  *
5306  * @return
5307  *   0 on success, a negative errno value otherwise and rte_errno is set.
5308  */
5309 static int
5310 flow_dv_validate_action_age(uint64_t action_flags,
5311                             const struct rte_flow_action *action,
5312                             struct rte_eth_dev *dev,
5313                             struct rte_flow_error *error)
5314 {
5315         struct mlx5_priv *priv = dev->data->dev_private;
5316         const struct rte_flow_action_age *age = action->conf;
5317
5318         if (!priv->sh->cdev->config.devx ||
5319             (priv->sh->cmng.counter_fallback && !priv->sh->aso_age_mng))
5320                 return rte_flow_error_set(error, ENOTSUP,
5321                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5322                                           NULL,
5323                                           "age action not supported");
5324         if (!(action->conf))
5325                 return rte_flow_error_set(error, EINVAL,
5326                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5327                                           "configuration cannot be null");
5328         if (!(age->timeout))
5329                 return rte_flow_error_set(error, EINVAL,
5330                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5331                                           "invalid timeout value 0");
5332         if (action_flags & MLX5_FLOW_ACTION_AGE)
5333                 return rte_flow_error_set(error, EINVAL,
5334                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5335                                           "duplicate age actions set");
5336         return 0;
5337 }
5338
5339 /**
5340  * Validate the modify-header IPv4 DSCP actions.
5341  *
5342  * @param[in] action_flags
5343  *   Holds the actions detected until now.
5344  * @param[in] action
5345  *   Pointer to the modify action.
5346  * @param[in] item_flags
5347  *   Holds the items detected.
5348  * @param[out] error
5349  *   Pointer to error structure.
5350  *
5351  * @return
5352  *   0 on success, a negative errno value otherwise and rte_errno is set.
5353  */
5354 static int
5355 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5356                                          const struct rte_flow_action *action,
5357                                          const uint64_t item_flags,
5358                                          struct rte_flow_error *error)
5359 {
5360         int ret = 0;
5361
5362         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5363         if (!ret) {
5364                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5365                         return rte_flow_error_set(error, EINVAL,
5366                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5367                                                   NULL,
5368                                                   "no ipv4 item in pattern");
5369         }
5370         return ret;
5371 }
5372
5373 /**
5374  * Validate the modify-header IPv6 DSCP actions.
5375  *
5376  * @param[in] action_flags
5377  *   Holds the actions detected until now.
5378  * @param[in] action
5379  *   Pointer to the modify action.
5380  * @param[in] item_flags
5381  *   Holds the items detected.
5382  * @param[out] error
5383  *   Pointer to error structure.
5384  *
5385  * @return
5386  *   0 on success, a negative errno value otherwise and rte_errno is set.
5387  */
5388 static int
5389 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5390                                          const struct rte_flow_action *action,
5391                                          const uint64_t item_flags,
5392                                          struct rte_flow_error *error)
5393 {
5394         int ret = 0;
5395
5396         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5397         if (!ret) {
5398                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5399                         return rte_flow_error_set(error, EINVAL,
5400                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5401                                                   NULL,
5402                                                   "no ipv6 item in pattern");
5403         }
5404         return ret;
5405 }
5406
5407 int
5408 flow_dv_modify_match_cb(void *tool_ctx __rte_unused,
5409                         struct mlx5_list_entry *entry, void *cb_ctx)
5410 {
5411         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5412         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5413         struct mlx5_flow_dv_modify_hdr_resource *resource =
5414                                   container_of(entry, typeof(*resource), entry);
5415         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5416
5417         key_len += ref->actions_num * sizeof(ref->actions[0]);
5418         return ref->actions_num != resource->actions_num ||
5419                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5420 }
5421
5422 static struct mlx5_indexed_pool *
5423 flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
5424 {
5425         struct mlx5_indexed_pool *ipool = __atomic_load_n
5426                                      (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);
5427
5428         if (!ipool) {
5429                 struct mlx5_indexed_pool *expected = NULL;
5430                 struct mlx5_indexed_pool_config cfg =
5431                     (struct mlx5_indexed_pool_config) {
5432                        .size = sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
5433                                                                    (index + 1) *
5434                                            sizeof(struct mlx5_modification_cmd),
5435                        .trunk_size = 64,
5436                        .grow_trunk = 3,
5437                        .grow_shift = 2,
5438                        .need_lock = 1,
5439                        .release_mem_en = !!sh->config.reclaim_mode,
5440                        .per_core_cache =
5441                                        sh->config.reclaim_mode ? 0 : (1 << 16),
5442                        .malloc = mlx5_malloc,
5443                        .free = mlx5_free,
5444                        .type = "mlx5_modify_action_resource",
5445                 };
5446
5447                 cfg.size = RTE_ALIGN(cfg.size, sizeof(ipool));
5448                 ipool = mlx5_ipool_create(&cfg);
5449                 if (!ipool)
5450                         return NULL;
5451                 if (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],
5452                                                  &expected, ipool, false,
5453                                                  __ATOMIC_SEQ_CST,
5454                                                  __ATOMIC_SEQ_CST)) {
5455                         mlx5_ipool_destroy(ipool);
5456                         ipool = __atomic_load_n(&sh->mdh_ipools[index],
5457                                                 __ATOMIC_SEQ_CST);
5458                 }
5459         }
5460         return ipool;
5461 }
5462
5463 struct mlx5_list_entry *
5464 flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx)
5465 {
5466         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5467         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5468         struct mlx5dv_dr_domain *ns;
5469         struct mlx5_flow_dv_modify_hdr_resource *entry;
5470         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5471         struct mlx5_indexed_pool *ipool = flow_dv_modify_ipool_get(sh,
5472                                                           ref->actions_num - 1);
5473         int ret;
5474         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5475         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5476         uint32_t idx;
5477
5478         if (unlikely(!ipool)) {
5479                 rte_flow_error_set(ctx->error, ENOMEM,
5480                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5481                                    NULL, "cannot allocate modify ipool");
5482                 return NULL;
5483         }
5484         entry = mlx5_ipool_zmalloc(ipool, &idx);
5485         if (!entry) {
5486                 rte_flow_error_set(ctx->error, ENOMEM,
5487                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5488                                    "cannot allocate resource memory");
5489                 return NULL;
5490         }
5491         rte_memcpy(&entry->ft_type,
5492                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5493                    key_len + data_len);
5494         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5495                 ns = sh->fdb_domain;
5496         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5497                 ns = sh->tx_domain;
5498         else
5499                 ns = sh->rx_domain;
5500         ret = mlx5_flow_os_create_flow_action_modify_header
5501                                         (sh->cdev->ctx, ns, entry,
5502                                          data_len, &entry->action);
5503         if (ret) {
5504                 mlx5_ipool_free(sh->mdh_ipools[ref->actions_num - 1], idx);
5505                 rte_flow_error_set(ctx->error, ENOMEM,
5506                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5507                                    NULL, "cannot create modification action");
5508                 return NULL;
5509         }
5510         entry->idx = idx;
5511         return &entry->entry;
5512 }
5513
5514 struct mlx5_list_entry *
5515 flow_dv_modify_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
5516                         void *cb_ctx)
5517 {
5518         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5519         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5520         struct mlx5_flow_dv_modify_hdr_resource *entry;
5521         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5522         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5523         uint32_t idx;
5524
5525         entry = mlx5_ipool_malloc(sh->mdh_ipools[ref->actions_num - 1],
5526                                   &idx);
5527         if (!entry) {
5528                 rte_flow_error_set(ctx->error, ENOMEM,
5529                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5530                                    "cannot allocate resource memory");
5531                 return NULL;
5532         }
5533         memcpy(entry, oentry, sizeof(*entry) + data_len);
5534         entry->idx = idx;
5535         return &entry->entry;
5536 }
5537
5538 void
5539 flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
5540 {
5541         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5542         struct mlx5_flow_dv_modify_hdr_resource *res =
5543                 container_of(entry, typeof(*res), entry);
5544
5545         mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
5546 }
5547
5548 /**
5549  * Validate the sample action.
5550  *
5551  * @param[in, out] action_flags
5552  *   Holds the actions detected until now.
5553  * @param[in] action
5554  *   Pointer to the sample action.
5555  * @param[in] dev
5556  *   Pointer to the Ethernet device structure.
5557  * @param[in] attr
5558  *   Attributes of flow that includes this action.
5559  * @param[in] item_flags
5560  *   Holds the items detected.
5561  * @param[in] rss
5562  *   Pointer to the RSS action.
5563  * @param[out] sample_rss
5564  *   Pointer to the RSS action in sample action list.
5565  * @param[out] count
5566  *   Pointer to the COUNT action in sample action list.
5567  * @param[out] fdb_mirror_limit
5568  *   Pointer to the FDB mirror limitation flag.
5569  * @param[out] error
5570  *   Pointer to error structure.
5571  *
5572  * @return
5573  *   0 on success, a negative errno value otherwise and rte_errno is set.
5574  */
5575 static int
5576 flow_dv_validate_action_sample(uint64_t *action_flags,
5577                                const struct rte_flow_action *action,
5578                                struct rte_eth_dev *dev,
5579                                const struct rte_flow_attr *attr,
5580                                uint64_t item_flags,
5581                                const struct rte_flow_action_rss *rss,
5582                                const struct rte_flow_action_rss **sample_rss,
5583                                const struct rte_flow_action_count **count,
5584                                int *fdb_mirror_limit,
5585                                struct rte_flow_error *error)
5586 {
5587         struct mlx5_priv *priv = dev->data->dev_private;
5588         struct mlx5_sh_config *dev_conf = &priv->sh->config;
5589         const struct rte_flow_action_sample *sample = action->conf;
5590         const struct rte_flow_action *act;
5591         uint64_t sub_action_flags = 0;
5592         uint16_t queue_index = 0xFFFF;
5593         int actions_n = 0;
5594         int ret;
5595
5596         if (!sample)
5597                 return rte_flow_error_set(error, EINVAL,
5598                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5599                                           "configuration cannot be NULL");
5600         if (sample->ratio == 0)
5601                 return rte_flow_error_set(error, EINVAL,
5602                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5603                                           "ratio value starts from 1");
5604         if (!priv->sh->cdev->config.devx ||
5605             (sample->ratio > 0 && !priv->sampler_en))
5606                 return rte_flow_error_set(error, ENOTSUP,
5607                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5608                                           NULL,
5609                                           "sample action not supported");
5610         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5611                 return rte_flow_error_set(error, EINVAL,
5612                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5613                                           "Multiple sample actions not "
5614                                           "supported");
5615         if (*action_flags & MLX5_FLOW_ACTION_METER)
5616                 return rte_flow_error_set(error, EINVAL,
5617                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5618                                           "wrong action order, meter should "
5619                                           "be after sample action");
5620         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5621                 return rte_flow_error_set(error, EINVAL,
5622                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5623                                           "wrong action order, jump should "
5624                                           "be after sample action");
5625         if (*action_flags & MLX5_FLOW_ACTION_CT)
5626                 return rte_flow_error_set(error, EINVAL,
5627                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5628                                           "Sample after CT not supported");
5629         act = sample->actions;
5630         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5631                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5632                         return rte_flow_error_set(error, ENOTSUP,
5633                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5634                                                   act, "too many actions");
5635                 switch (act->type) {
5636                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5637                         ret = mlx5_flow_validate_action_queue(act,
5638                                                               sub_action_flags,
5639                                                               dev,
5640                                                               attr, error);
5641                         if (ret < 0)
5642                                 return ret;
5643                         queue_index = ((const struct rte_flow_action_queue *)
5644                                                         (act->conf))->index;
5645                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5646                         ++actions_n;
5647                         break;
5648                 case RTE_FLOW_ACTION_TYPE_RSS:
5649                         *sample_rss = act->conf;
5650                         ret = mlx5_flow_validate_action_rss(act,
5651                                                             sub_action_flags,
5652                                                             dev, attr,
5653                                                             item_flags,
5654                                                             error);
5655                         if (ret < 0)
5656                                 return ret;
5657                         if (rss && *sample_rss &&
5658                             ((*sample_rss)->level != rss->level ||
5659                             (*sample_rss)->types != rss->types))
5660                                 return rte_flow_error_set(error, ENOTSUP,
5661                                         RTE_FLOW_ERROR_TYPE_ACTION,
5662                                         NULL,
5663                                         "Can't use the different RSS types "
5664                                         "or level in the same flow");
5665                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5666                                 queue_index = (*sample_rss)->queue[0];
5667                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5668                         ++actions_n;
5669                         break;
5670                 case RTE_FLOW_ACTION_TYPE_MARK:
5671                         ret = flow_dv_validate_action_mark(dev, act,
5672                                                            sub_action_flags,
5673                                                            attr, error);
5674                         if (ret < 0)
5675                                 return ret;
5676                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5677                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5678                                                 MLX5_FLOW_ACTION_MARK_EXT;
5679                         else
5680                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5681                         ++actions_n;
5682                         break;
5683                 case RTE_FLOW_ACTION_TYPE_COUNT:
5684                         ret = flow_dv_validate_action_count
5685                                 (dev, false, *action_flags | sub_action_flags,
5686                                  attr, error);
5687                         if (ret < 0)
5688                                 return ret;
5689                         *count = act->conf;
5690                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5691                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5692                         ++actions_n;
5693                         break;
5694                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5695                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5696                         ret = flow_dv_validate_action_port_id(dev,
5697                                                               sub_action_flags,
5698                                                               act,
5699                                                               attr,
5700                                                               error);
5701                         if (ret)
5702                                 return ret;
5703                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5704                         ++actions_n;
5705                         break;
5706                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5707                         ret = flow_dv_validate_action_raw_encap_decap
5708                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5709                                  &actions_n, action, item_flags, error);
5710                         if (ret < 0)
5711                                 return ret;
5712                         ++actions_n;
5713                         break;
5714                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5715                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5716                         ret = flow_dv_validate_action_l2_encap(dev,
5717                                                                sub_action_flags,
5718                                                                act, attr,
5719                                                                error);
5720                         if (ret < 0)
5721                                 return ret;
5722                         sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5723                         ++actions_n;
5724                         break;
5725                 default:
5726                         return rte_flow_error_set(error, ENOTSUP,
5727                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5728                                                   NULL,
5729                                                   "Doesn't support optional "
5730                                                   "action");
5731                 }
5732         }
5733         if (attr->ingress && !attr->transfer) {
5734                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5735                                           MLX5_FLOW_ACTION_RSS)))
5736                         return rte_flow_error_set(error, EINVAL,
5737                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5738                                                   NULL,
5739                                                   "Ingress must has a dest "
5740                                                   "QUEUE for Sample");
5741         } else if (attr->egress && !attr->transfer) {
5742                 return rte_flow_error_set(error, ENOTSUP,
5743                                           RTE_FLOW_ERROR_TYPE_ACTION,
5744                                           NULL,
5745                                           "Sample Only support Ingress "
5746                                           "or E-Switch");
5747         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5748                 MLX5_ASSERT(attr->transfer);
5749                 if (sample->ratio > 1)
5750                         return rte_flow_error_set(error, ENOTSUP,
5751                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5752                                                   NULL,
5753                                                   "E-Switch doesn't support "
5754                                                   "any optional action "
5755                                                   "for sampling");
5756                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5757                         return rte_flow_error_set(error, ENOTSUP,
5758                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5759                                                   NULL,
5760                                                   "unsupported action QUEUE");
5761                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5762                         return rte_flow_error_set(error, ENOTSUP,
5763                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5764                                                   NULL,
5765                                                   "unsupported action QUEUE");
5766                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5767                         return rte_flow_error_set(error, EINVAL,
5768                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5769                                                   NULL,
5770                                                   "E-Switch must has a dest "
5771                                                   "port for mirroring");
5772                 if (!priv->sh->cdev->config.hca_attr.reg_c_preserve &&
5773                      priv->representor_id != UINT16_MAX)
5774                         *fdb_mirror_limit = 1;
5775         }
5776         /* Continue validation for Xcap actions.*/
5777         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5778             (queue_index == 0xFFFF || !mlx5_rxq_is_hairpin(dev, queue_index))) {
5779                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5780                      MLX5_FLOW_XCAP_ACTIONS)
5781                         return rte_flow_error_set(error, ENOTSUP,
5782                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5783                                                   NULL, "encap and decap "
5784                                                   "combination aren't "
5785                                                   "supported");
5786                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5787                                                         MLX5_FLOW_ACTION_ENCAP))
5788                         return rte_flow_error_set(error, ENOTSUP,
5789                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5790                                                   NULL, "encap is not supported"
5791                                                   " for ingress traffic");
5792         }
5793         return 0;
5794 }
5795
5796 /**
5797  * Find existing modify-header resource or create and register a new one.
5798  *
5799  * @param dev[in, out]
5800  *   Pointer to rte_eth_dev structure.
5801  * @param[in, out] resource
5802  *   Pointer to modify-header resource.
5803  * @parm[in, out] dev_flow
5804  *   Pointer to the dev_flow.
5805  * @param[out] error
5806  *   pointer to error structure.
5807  *
5808  * @return
5809  *   0 on success otherwise -errno and errno is set.
5810  */
5811 static int
5812 flow_dv_modify_hdr_resource_register
5813                         (struct rte_eth_dev *dev,
5814                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5815                          struct mlx5_flow *dev_flow,
5816                          struct rte_flow_error *error)
5817 {
5818         struct mlx5_priv *priv = dev->data->dev_private;
5819         struct mlx5_dev_ctx_shared *sh = priv->sh;
5820         uint32_t key_len = sizeof(*resource) -
5821                            offsetof(typeof(*resource), ft_type) +
5822                            resource->actions_num * sizeof(resource->actions[0]);
5823         struct mlx5_list_entry *entry;
5824         struct mlx5_flow_cb_ctx ctx = {
5825                 .error = error,
5826                 .data = resource,
5827         };
5828         struct mlx5_hlist *modify_cmds;
5829         uint64_t key64;
5830
5831         modify_cmds = flow_dv_hlist_prepare(sh, &sh->modify_cmds,
5832                                 "hdr_modify",
5833                                 MLX5_FLOW_HDR_MODIFY_HTABLE_SZ,
5834                                 true, false, sh,
5835                                 flow_dv_modify_create_cb,
5836                                 flow_dv_modify_match_cb,
5837                                 flow_dv_modify_remove_cb,
5838                                 flow_dv_modify_clone_cb,
5839                                 flow_dv_modify_clone_free_cb,
5840                                 error);
5841         if (unlikely(!modify_cmds))
5842                 return -rte_errno;
5843         resource->root = !dev_flow->dv.group;
5844         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5845                                                                 resource->root))
5846                 return rte_flow_error_set(error, EOVERFLOW,
5847                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5848                                           "too many modify header items");
5849         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5850         entry = mlx5_hlist_register(modify_cmds, key64, &ctx);
5851         if (!entry)
5852                 return -rte_errno;
5853         resource = container_of(entry, typeof(*resource), entry);
5854         dev_flow->handle->dvh.modify_hdr = resource;
5855         return 0;
5856 }
5857
5858 /**
5859  * Get DV flow counter by index.
5860  *
5861  * @param[in] dev
5862  *   Pointer to the Ethernet device structure.
5863  * @param[in] idx
5864  *   mlx5 flow counter index in the container.
5865  * @param[out] ppool
5866  *   mlx5 flow counter pool in the container.
5867  *
5868  * @return
5869  *   Pointer to the counter, NULL otherwise.
5870  */
5871 static struct mlx5_flow_counter *
5872 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5873                            uint32_t idx,
5874                            struct mlx5_flow_counter_pool **ppool)
5875 {
5876         struct mlx5_priv *priv = dev->data->dev_private;
5877         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5878         struct mlx5_flow_counter_pool *pool;
5879
5880         /* Decrease to original index and clear shared bit. */
5881         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5882         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5883         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5884         MLX5_ASSERT(pool);
5885         if (ppool)
5886                 *ppool = pool;
5887         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5888 }
5889
5890 /**
5891  * Check the devx counter belongs to the pool.
5892  *
5893  * @param[in] pool
5894  *   Pointer to the counter pool.
5895  * @param[in] id
5896  *   The counter devx ID.
5897  *
5898  * @return
5899  *   True if counter belongs to the pool, false otherwise.
5900  */
5901 static bool
5902 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5903 {
5904         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5905                    MLX5_COUNTERS_PER_POOL;
5906
5907         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5908                 return true;
5909         return false;
5910 }
5911
5912 /**
5913  * Get a pool by devx counter ID.
5914  *
5915  * @param[in] cmng
5916  *   Pointer to the counter management.
5917  * @param[in] id
5918  *   The counter devx ID.
5919  *
5920  * @return
5921  *   The counter pool pointer if exists, NULL otherwise,
5922  */
5923 static struct mlx5_flow_counter_pool *
5924 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5925 {
5926         uint32_t i;
5927         struct mlx5_flow_counter_pool *pool = NULL;
5928
5929         rte_spinlock_lock(&cmng->pool_update_sl);
5930         /* Check last used pool. */
5931         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5932             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5933                 pool = cmng->pools[cmng->last_pool_idx];
5934                 goto out;
5935         }
5936         /* ID out of range means no suitable pool in the container. */
5937         if (id > cmng->max_id || id < cmng->min_id)
5938                 goto out;
5939         /*
5940          * Find the pool from the end of the container, since mostly counter
5941          * ID is sequence increasing, and the last pool should be the needed
5942          * one.
5943          */
5944         i = cmng->n_valid;
5945         while (i--) {
5946                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5947
5948                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5949                         pool = pool_tmp;
5950                         break;
5951                 }
5952         }
5953 out:
5954         rte_spinlock_unlock(&cmng->pool_update_sl);
5955         return pool;
5956 }
5957
5958 /**
5959  * Resize a counter container.
5960  *
5961  * @param[in] dev
5962  *   Pointer to the Ethernet device structure.
5963  *
5964  * @return
5965  *   0 on success, otherwise negative errno value and rte_errno is set.
5966  */
5967 static int
5968 flow_dv_container_resize(struct rte_eth_dev *dev)
5969 {
5970         struct mlx5_priv *priv = dev->data->dev_private;
5971         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5972         void *old_pools = cmng->pools;
5973         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5974         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5975         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5976
5977         if (!pools) {
5978                 rte_errno = ENOMEM;
5979                 return -ENOMEM;
5980         }
5981         if (old_pools)
5982                 memcpy(pools, old_pools, cmng->n *
5983                                        sizeof(struct mlx5_flow_counter_pool *));
5984         cmng->n = resize;
5985         cmng->pools = pools;
5986         if (old_pools)
5987                 mlx5_free(old_pools);
5988         return 0;
5989 }
5990
5991 /**
5992  * Query a devx flow counter.
5993  *
5994  * @param[in] dev
5995  *   Pointer to the Ethernet device structure.
5996  * @param[in] counter
5997  *   Index to the flow counter.
5998  * @param[out] pkts
5999  *   The statistics value of packets.
6000  * @param[out] bytes
6001  *   The statistics value of bytes.
6002  *
6003  * @return
6004  *   0 on success, otherwise a negative errno value and rte_errno is set.
6005  */
6006 static inline int
6007 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
6008                      uint64_t *bytes)
6009 {
6010         struct mlx5_priv *priv = dev->data->dev_private;
6011         struct mlx5_flow_counter_pool *pool = NULL;
6012         struct mlx5_flow_counter *cnt;
6013         int offset;
6014
6015         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6016         MLX5_ASSERT(pool);
6017         if (priv->sh->cmng.counter_fallback)
6018                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
6019                                         0, pkts, bytes, 0, NULL, NULL, 0);
6020         rte_spinlock_lock(&pool->sl);
6021         if (!pool->raw) {
6022                 *pkts = 0;
6023                 *bytes = 0;
6024         } else {
6025                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
6026                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
6027                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
6028         }
6029         rte_spinlock_unlock(&pool->sl);
6030         return 0;
6031 }
6032
6033 /**
6034  * Create and initialize a new counter pool.
6035  *
6036  * @param[in] dev
6037  *   Pointer to the Ethernet device structure.
6038  * @param[out] dcs
6039  *   The devX counter handle.
6040  * @param[in] age
6041  *   Whether the pool is for counter that was allocated for aging.
6042  * @param[in/out] cont_cur
6043  *   Pointer to the container pointer, it will be update in pool resize.
6044  *
6045  * @return
6046  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
6047  */
6048 static struct mlx5_flow_counter_pool *
6049 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
6050                     uint32_t age)
6051 {
6052         struct mlx5_priv *priv = dev->data->dev_private;
6053         struct mlx5_flow_counter_pool *pool;
6054         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6055         bool fallback = priv->sh->cmng.counter_fallback;
6056         uint32_t size = sizeof(*pool);
6057
6058         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
6059         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
6060         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
6061         if (!pool) {
6062                 rte_errno = ENOMEM;
6063                 return NULL;
6064         }
6065         pool->raw = NULL;
6066         pool->is_aged = !!age;
6067         pool->query_gen = 0;
6068         pool->min_dcs = dcs;
6069         rte_spinlock_init(&pool->sl);
6070         rte_spinlock_init(&pool->csl);
6071         TAILQ_INIT(&pool->counters[0]);
6072         TAILQ_INIT(&pool->counters[1]);
6073         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
6074         rte_spinlock_lock(&cmng->pool_update_sl);
6075         pool->index = cmng->n_valid;
6076         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
6077                 mlx5_free(pool);
6078                 rte_spinlock_unlock(&cmng->pool_update_sl);
6079                 return NULL;
6080         }
6081         cmng->pools[pool->index] = pool;
6082         cmng->n_valid++;
6083         if (unlikely(fallback)) {
6084                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
6085
6086                 if (base < cmng->min_id)
6087                         cmng->min_id = base;
6088                 if (base > cmng->max_id)
6089                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
6090                 cmng->last_pool_idx = pool->index;
6091         }
6092         rte_spinlock_unlock(&cmng->pool_update_sl);
6093         return pool;
6094 }
6095
6096 /**
6097  * Prepare a new counter and/or a new counter pool.
6098  *
6099  * @param[in] dev
6100  *   Pointer to the Ethernet device structure.
6101  * @param[out] cnt_free
6102  *   Where to put the pointer of a new counter.
6103  * @param[in] age
6104  *   Whether the pool is for counter that was allocated for aging.
6105  *
6106  * @return
6107  *   The counter pool pointer and @p cnt_free is set on success,
6108  *   NULL otherwise and rte_errno is set.
6109  */
6110 static struct mlx5_flow_counter_pool *
6111 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
6112                              struct mlx5_flow_counter **cnt_free,
6113                              uint32_t age)
6114 {
6115         struct mlx5_priv *priv = dev->data->dev_private;
6116         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6117         struct mlx5_flow_counter_pool *pool;
6118         struct mlx5_counters tmp_tq;
6119         struct mlx5_devx_obj *dcs = NULL;
6120         struct mlx5_flow_counter *cnt;
6121         enum mlx5_counter_type cnt_type =
6122                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6123         bool fallback = priv->sh->cmng.counter_fallback;
6124         uint32_t i;
6125
6126         if (fallback) {
6127                 /* bulk_bitmap must be 0 for single counter allocation. */
6128                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0);
6129                 if (!dcs)
6130                         return NULL;
6131                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
6132                 if (!pool) {
6133                         pool = flow_dv_pool_create(dev, dcs, age);
6134                         if (!pool) {
6135                                 mlx5_devx_cmd_destroy(dcs);
6136                                 return NULL;
6137                         }
6138                 }
6139                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
6140                 cnt = MLX5_POOL_GET_CNT(pool, i);
6141                 cnt->pool = pool;
6142                 cnt->dcs_when_free = dcs;
6143                 *cnt_free = cnt;
6144                 return pool;
6145         }
6146         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
6147         if (!dcs) {
6148                 rte_errno = ENODATA;
6149                 return NULL;
6150         }
6151         pool = flow_dv_pool_create(dev, dcs, age);
6152         if (!pool) {
6153                 mlx5_devx_cmd_destroy(dcs);
6154                 return NULL;
6155         }
6156         TAILQ_INIT(&tmp_tq);
6157         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
6158                 cnt = MLX5_POOL_GET_CNT(pool, i);
6159                 cnt->pool = pool;
6160                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
6161         }
6162         rte_spinlock_lock(&cmng->csl[cnt_type]);
6163         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
6164         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6165         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
6166         (*cnt_free)->pool = pool;
6167         return pool;
6168 }
6169
6170 /**
6171  * Allocate a flow counter.
6172  *
6173  * @param[in] dev
6174  *   Pointer to the Ethernet device structure.
6175  * @param[in] age
6176  *   Whether the counter was allocated for aging.
6177  *
6178  * @return
6179  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6180  */
6181 static uint32_t
6182 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
6183 {
6184         struct mlx5_priv *priv = dev->data->dev_private;
6185         struct mlx5_flow_counter_pool *pool = NULL;
6186         struct mlx5_flow_counter *cnt_free = NULL;
6187         bool fallback = priv->sh->cmng.counter_fallback;
6188         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6189         enum mlx5_counter_type cnt_type =
6190                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6191         uint32_t cnt_idx;
6192
6193         if (!priv->sh->cdev->config.devx) {
6194                 rte_errno = ENOTSUP;
6195                 return 0;
6196         }
6197         /* Get free counters from container. */
6198         rte_spinlock_lock(&cmng->csl[cnt_type]);
6199         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
6200         if (cnt_free)
6201                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
6202         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6203         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
6204                 goto err;
6205         pool = cnt_free->pool;
6206         if (fallback)
6207                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
6208         /* Create a DV counter action only in the first time usage. */
6209         if (!cnt_free->action) {
6210                 uint16_t offset;
6211                 struct mlx5_devx_obj *dcs;
6212                 int ret;
6213
6214                 if (!fallback) {
6215                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
6216                         dcs = pool->min_dcs;
6217                 } else {
6218                         offset = 0;
6219                         dcs = cnt_free->dcs_when_free;
6220                 }
6221                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
6222                                                             &cnt_free->action);
6223                 if (ret) {
6224                         rte_errno = errno;
6225                         goto err;
6226                 }
6227         }
6228         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
6229                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
6230         /* Update the counter reset values. */
6231         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
6232                                  &cnt_free->bytes))
6233                 goto err;
6234         if (!fallback && !priv->sh->cmng.query_thread_on)
6235                 /* Start the asynchronous batch query by the host thread. */
6236                 mlx5_set_query_alarm(priv->sh);
6237         /*
6238          * When the count action isn't shared (by ID), shared_info field is
6239          * used for indirect action API's refcnt.
6240          * When the counter action is not shared neither by ID nor by indirect
6241          * action API, shared info must be 1.
6242          */
6243         cnt_free->shared_info.refcnt = 1;
6244         return cnt_idx;
6245 err:
6246         if (cnt_free) {
6247                 cnt_free->pool = pool;
6248                 if (fallback)
6249                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
6250                 rte_spinlock_lock(&cmng->csl[cnt_type]);
6251                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
6252                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
6253         }
6254         return 0;
6255 }
6256
6257 /**
6258  * Get age param from counter index.
6259  *
6260  * @param[in] dev
6261  *   Pointer to the Ethernet device structure.
6262  * @param[in] counter
6263  *   Index to the counter handler.
6264  *
6265  * @return
6266  *   The aging parameter specified for the counter index.
6267  */
6268 static struct mlx5_age_param*
6269 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
6270                                 uint32_t counter)
6271 {
6272         struct mlx5_flow_counter *cnt;
6273         struct mlx5_flow_counter_pool *pool = NULL;
6274
6275         flow_dv_counter_get_by_idx(dev, counter, &pool);
6276         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
6277         cnt = MLX5_POOL_GET_CNT(pool, counter);
6278         return MLX5_CNT_TO_AGE(cnt);
6279 }
6280
6281 /**
6282  * Remove a flow counter from aged counter list.
6283  *
6284  * @param[in] dev
6285  *   Pointer to the Ethernet device structure.
6286  * @param[in] counter
6287  *   Index to the counter handler.
6288  * @param[in] cnt
6289  *   Pointer to the counter handler.
6290  */
6291 static void
6292 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
6293                                 uint32_t counter, struct mlx5_flow_counter *cnt)
6294 {
6295         struct mlx5_age_info *age_info;
6296         struct mlx5_age_param *age_param;
6297         struct mlx5_priv *priv = dev->data->dev_private;
6298         uint16_t expected = AGE_CANDIDATE;
6299
6300         age_info = GET_PORT_AGE_INFO(priv);
6301         age_param = flow_dv_counter_idx_get_age(dev, counter);
6302         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
6303                                          AGE_FREE, false, __ATOMIC_RELAXED,
6304                                          __ATOMIC_RELAXED)) {
6305                 /**
6306                  * We need the lock even it is age timeout,
6307                  * since counter may still in process.
6308                  */
6309                 rte_spinlock_lock(&age_info->aged_sl);
6310                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
6311                 rte_spinlock_unlock(&age_info->aged_sl);
6312                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
6313         }
6314 }
6315
6316 /**
6317  * Release a flow counter.
6318  *
6319  * @param[in] dev
6320  *   Pointer to the Ethernet device structure.
6321  * @param[in] counter
6322  *   Index to the counter handler.
6323  */
6324 static void
6325 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
6326 {
6327         struct mlx5_priv *priv = dev->data->dev_private;
6328         struct mlx5_flow_counter_pool *pool = NULL;
6329         struct mlx5_flow_counter *cnt;
6330         enum mlx5_counter_type cnt_type;
6331
6332         if (!counter)
6333                 return;
6334         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6335         MLX5_ASSERT(pool);
6336         if (pool->is_aged) {
6337                 flow_dv_counter_remove_from_age(dev, counter, cnt);
6338         } else {
6339                 /*
6340                  * If the counter action is shared by indirect action API,
6341                  * the atomic function reduces its references counter.
6342                  * If after the reduction the action is still referenced, the
6343                  * function returns here and does not release it.
6344                  * When the counter action is not shared by
6345                  * indirect action API, shared info is 1 before the reduction,
6346                  * so this condition is failed and function doesn't return here.
6347                  */
6348                 if (__atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
6349                                        __ATOMIC_RELAXED))
6350                         return;
6351         }
6352         cnt->pool = pool;
6353         /*
6354          * Put the counter back to list to be updated in none fallback mode.
6355          * Currently, we are using two list alternately, while one is in query,
6356          * add the freed counter to the other list based on the pool query_gen
6357          * value. After query finishes, add counter the list to the global
6358          * container counter list. The list changes while query starts. In
6359          * this case, lock will not be needed as query callback and release
6360          * function both operate with the different list.
6361          */
6362         if (!priv->sh->cmng.counter_fallback) {
6363                 rte_spinlock_lock(&pool->csl);
6364                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
6365                 rte_spinlock_unlock(&pool->csl);
6366         } else {
6367                 cnt->dcs_when_free = cnt->dcs_when_active;
6368                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6369                                            MLX5_COUNTER_TYPE_ORIGIN;
6370                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
6371                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
6372                                   cnt, next);
6373                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
6374         }
6375 }
6376
6377 /**
6378  * Resize a meter id container.
6379  *
6380  * @param[in] dev
6381  *   Pointer to the Ethernet device structure.
6382  *
6383  * @return
6384  *   0 on success, otherwise negative errno value and rte_errno is set.
6385  */
6386 static int
6387 flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
6388 {
6389         struct mlx5_priv *priv = dev->data->dev_private;
6390         struct mlx5_aso_mtr_pools_mng *pools_mng =
6391                                 &priv->sh->mtrmng->pools_mng;
6392         void *old_pools = pools_mng->pools;
6393         uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
6394         uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
6395         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6396
6397         if (!pools) {
6398                 rte_errno = ENOMEM;
6399                 return -ENOMEM;
6400         }
6401         if (!pools_mng->n)
6402                 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6403                         mlx5_free(pools);
6404                         return -ENOMEM;
6405                 }
6406         if (old_pools)
6407                 memcpy(pools, old_pools, pools_mng->n *
6408                                        sizeof(struct mlx5_aso_mtr_pool *));
6409         pools_mng->n = resize;
6410         pools_mng->pools = pools;
6411         if (old_pools)
6412                 mlx5_free(old_pools);
6413         return 0;
6414 }
6415
6416 /**
6417  * Prepare a new meter and/or a new meter pool.
6418  *
6419  * @param[in] dev
6420  *   Pointer to the Ethernet device structure.
6421  * @param[out] mtr_free
6422  *   Where to put the pointer of a new meter.g.
6423  *
6424  * @return
6425  *   The meter pool pointer and @mtr_free is set on success,
6426  *   NULL otherwise and rte_errno is set.
6427  */
6428 static struct mlx5_aso_mtr_pool *
6429 flow_dv_mtr_pool_create(struct rte_eth_dev *dev, struct mlx5_aso_mtr **mtr_free)
6430 {
6431         struct mlx5_priv *priv = dev->data->dev_private;
6432         struct mlx5_aso_mtr_pools_mng *pools_mng = &priv->sh->mtrmng->pools_mng;
6433         struct mlx5_aso_mtr_pool *pool = NULL;
6434         struct mlx5_devx_obj *dcs = NULL;
6435         uint32_t i;
6436         uint32_t log_obj_size;
6437
6438         log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6439         dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->cdev->ctx,
6440                                                       priv->sh->cdev->pdn,
6441                                                       log_obj_size);
6442         if (!dcs) {
6443                 rte_errno = ENODATA;
6444                 return NULL;
6445         }
6446         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6447         if (!pool) {
6448                 rte_errno = ENOMEM;
6449                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6450                 return NULL;
6451         }
6452         pool->devx_obj = dcs;
6453         rte_rwlock_write_lock(&pools_mng->resize_mtrwl);
6454         pool->index = pools_mng->n_valid;
6455         if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6456                 mlx5_free(pool);
6457                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6458                 rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
6459                 return NULL;
6460         }
6461         pools_mng->pools[pool->index] = pool;
6462         pools_mng->n_valid++;
6463         rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
6464         for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6465                 pool->mtrs[i].offset = i;
6466                 LIST_INSERT_HEAD(&pools_mng->meters, &pool->mtrs[i], next);
6467         }
6468         pool->mtrs[0].offset = 0;
6469         *mtr_free = &pool->mtrs[0];
6470         return pool;
6471 }
6472
6473 /**
6474  * Release a flow meter into pool.
6475  *
6476  * @param[in] dev
6477  *   Pointer to the Ethernet device structure.
6478  * @param[in] mtr_idx
6479  *   Index to aso flow meter.
6480  */
6481 static void
6482 flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6483 {
6484         struct mlx5_priv *priv = dev->data->dev_private;
6485         struct mlx5_aso_mtr_pools_mng *pools_mng =
6486                                 &priv->sh->mtrmng->pools_mng;
6487         struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6488
6489         MLX5_ASSERT(aso_mtr);
6490         rte_spinlock_lock(&pools_mng->mtrsl);
6491         memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6492         aso_mtr->state = ASO_METER_FREE;
6493         LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6494         rte_spinlock_unlock(&pools_mng->mtrsl);
6495 }
6496
6497 /**
6498  * Allocate a aso flow meter.
6499  *
6500  * @param[in] dev
6501  *   Pointer to the Ethernet device structure.
6502  *
6503  * @return
6504  *   Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6505  */
6506 static uint32_t
6507 flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6508 {
6509         struct mlx5_priv *priv = dev->data->dev_private;
6510         struct mlx5_aso_mtr *mtr_free = NULL;
6511         struct mlx5_aso_mtr_pools_mng *pools_mng =
6512                                 &priv->sh->mtrmng->pools_mng;
6513         struct mlx5_aso_mtr_pool *pool;
6514         uint32_t mtr_idx = 0;
6515
6516         if (!priv->sh->cdev->config.devx) {
6517                 rte_errno = ENOTSUP;
6518                 return 0;
6519         }
6520         /* Allocate the flow meter memory. */
6521         /* Get free meters from management. */
6522         rte_spinlock_lock(&pools_mng->mtrsl);
6523         mtr_free = LIST_FIRST(&pools_mng->meters);
6524         if (mtr_free)
6525                 LIST_REMOVE(mtr_free, next);
6526         if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6527                 rte_spinlock_unlock(&pools_mng->mtrsl);
6528                 return 0;
6529         }
6530         mtr_free->state = ASO_METER_WAIT;
6531         rte_spinlock_unlock(&pools_mng->mtrsl);
6532         pool = container_of(mtr_free,
6533                         struct mlx5_aso_mtr_pool,
6534                         mtrs[mtr_free->offset]);
6535         mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6536         if (!mtr_free->fm.meter_action) {
6537 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6538                 struct rte_flow_error error;
6539                 uint8_t reg_id;
6540
6541                 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6542                 mtr_free->fm.meter_action =
6543                         mlx5_glue->dv_create_flow_action_aso
6544                                                 (priv->sh->rx_domain,
6545                                                  pool->devx_obj->obj,
6546                                                  mtr_free->offset,
6547                                                  (1 << MLX5_FLOW_COLOR_GREEN),
6548                                                  reg_id - REG_C_0);
6549 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6550                 if (!mtr_free->fm.meter_action) {
6551                         flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6552                         return 0;
6553                 }
6554         }
6555         return mtr_idx;
6556 }
6557
6558 /**
6559  * Verify the @p attributes will be correctly understood by the NIC and store
6560  * them in the @p flow if everything is correct.
6561  *
6562  * @param[in] dev
6563  *   Pointer to dev struct.
6564  * @param[in] attributes
6565  *   Pointer to flow attributes
6566  * @param[in] external
6567  *   This flow rule is created by request external to PMD.
6568  * @param[out] error
6569  *   Pointer to error structure.
6570  *
6571  * @return
6572  *   - 0 on success and non root table.
6573  *   - 1 on success and root table.
6574  *   - a negative errno value otherwise and rte_errno is set.
6575  */
6576 static int
6577 flow_dv_validate_attributes(struct rte_eth_dev *dev,
6578                             const struct mlx5_flow_tunnel *tunnel,
6579                             const struct rte_flow_attr *attributes,
6580                             const struct flow_grp_info *grp_info,
6581                             struct rte_flow_error *error)
6582 {
6583         struct mlx5_priv *priv = dev->data->dev_private;
6584         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6585         int ret = 0;
6586
6587 #ifndef HAVE_MLX5DV_DR
6588         RTE_SET_USED(tunnel);
6589         RTE_SET_USED(grp_info);
6590         if (attributes->group)
6591                 return rte_flow_error_set(error, ENOTSUP,
6592                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6593                                           NULL,
6594                                           "groups are not supported");
6595 #else
6596         uint32_t table = 0;
6597
6598         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6599                                        grp_info, error);
6600         if (ret)
6601                 return ret;
6602         if (!table)
6603                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6604 #endif
6605         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6606             attributes->priority > lowest_priority)
6607                 return rte_flow_error_set(error, ENOTSUP,
6608                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6609                                           NULL,
6610                                           "priority out of range");
6611         if (attributes->transfer) {
6612                 if (!priv->sh->config.dv_esw_en)
6613                         return rte_flow_error_set
6614                                 (error, ENOTSUP,
6615                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6616                                  "E-Switch dr is not supported");
6617                 if (attributes->egress)
6618                         return rte_flow_error_set
6619                                 (error, ENOTSUP,
6620                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6621                                  "egress is not supported");
6622         }
6623         if (!(attributes->egress ^ attributes->ingress))
6624                 return rte_flow_error_set(error, ENOTSUP,
6625                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6626                                           "must specify exactly one of "
6627                                           "ingress or egress");
6628         return ret;
6629 }
6630
6631 static int
6632 validate_integrity_bits(const struct rte_flow_item_integrity *mask,
6633                         int64_t pattern_flags, uint64_t l3_flags,
6634                         uint64_t l4_flags, uint64_t ip4_flag,
6635                         struct rte_flow_error *error)
6636 {
6637         if (mask->l3_ok && !(pattern_flags & l3_flags))
6638                 return rte_flow_error_set(error, EINVAL,
6639                                           RTE_FLOW_ERROR_TYPE_ITEM,
6640                                           NULL, "missing L3 protocol");
6641
6642         if (mask->ipv4_csum_ok && !(pattern_flags & ip4_flag))
6643                 return rte_flow_error_set(error, EINVAL,
6644                                           RTE_FLOW_ERROR_TYPE_ITEM,
6645                                           NULL, "missing IPv4 protocol");
6646
6647         if ((mask->l4_ok || mask->l4_csum_ok) && !(pattern_flags & l4_flags))
6648                 return rte_flow_error_set(error, EINVAL,
6649                                           RTE_FLOW_ERROR_TYPE_ITEM,
6650                                           NULL, "missing L4 protocol");
6651
6652         return 0;
6653 }
6654
6655 static int
6656 flow_dv_validate_item_integrity_post(const struct
6657                                      rte_flow_item *integrity_items[2],
6658                                      int64_t pattern_flags,
6659                                      struct rte_flow_error *error)
6660 {
6661         const struct rte_flow_item_integrity *mask;
6662         int ret;
6663
6664         if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
6665                 mask = (typeof(mask))integrity_items[0]->mask;
6666                 ret = validate_integrity_bits(mask, pattern_flags,
6667                                               MLX5_FLOW_LAYER_OUTER_L3,
6668                                               MLX5_FLOW_LAYER_OUTER_L4,
6669                                               MLX5_FLOW_LAYER_OUTER_L3_IPV4,
6670                                               error);
6671                 if (ret)
6672                         return ret;
6673         }
6674         if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
6675                 mask = (typeof(mask))integrity_items[1]->mask;
6676                 ret = validate_integrity_bits(mask, pattern_flags,
6677                                               MLX5_FLOW_LAYER_INNER_L3,
6678                                               MLX5_FLOW_LAYER_INNER_L4,
6679                                               MLX5_FLOW_LAYER_INNER_L3_IPV4,
6680                                               error);
6681                 if (ret)
6682                         return ret;
6683         }
6684         return 0;
6685 }
6686
6687 static int
6688 flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
6689                                 const struct rte_flow_item *integrity_item,
6690                                 uint64_t pattern_flags, uint64_t *last_item,
6691                                 const struct rte_flow_item *integrity_items[2],
6692                                 struct rte_flow_error *error)
6693 {
6694         struct mlx5_priv *priv = dev->data->dev_private;
6695         const struct rte_flow_item_integrity *mask = (typeof(mask))
6696                                                      integrity_item->mask;
6697         const struct rte_flow_item_integrity *spec = (typeof(spec))
6698                                                      integrity_item->spec;
6699
6700         if (!priv->sh->cdev->config.hca_attr.pkt_integrity_match)
6701                 return rte_flow_error_set(error, ENOTSUP,
6702                                           RTE_FLOW_ERROR_TYPE_ITEM,
6703                                           integrity_item,
6704                                           "packet integrity integrity_item not supported");
6705         if (!spec)
6706                 return rte_flow_error_set(error, ENOTSUP,
6707                                           RTE_FLOW_ERROR_TYPE_ITEM,
6708                                           integrity_item,
6709                                           "no spec for integrity item");
6710         if (!mask)
6711                 mask = &rte_flow_item_integrity_mask;
6712         if (!mlx5_validate_integrity_item(mask))
6713                 return rte_flow_error_set(error, ENOTSUP,
6714                                           RTE_FLOW_ERROR_TYPE_ITEM,
6715                                           integrity_item,
6716                                           "unsupported integrity filter");
6717         if (spec->level > 1) {
6718                 if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY)
6719                         return rte_flow_error_set
6720                                 (error, ENOTSUP,
6721                                  RTE_FLOW_ERROR_TYPE_ITEM,
6722                                  NULL, "multiple inner integrity items not supported");
6723                 integrity_items[1] = integrity_item;
6724                 *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
6725         } else {
6726                 if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY)
6727                         return rte_flow_error_set
6728                                 (error, ENOTSUP,
6729                                  RTE_FLOW_ERROR_TYPE_ITEM,
6730                                  NULL, "multiple outer integrity items not supported");
6731                 integrity_items[0] = integrity_item;
6732                 *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
6733         }
6734         return 0;
6735 }
6736
6737 static int
6738 flow_dv_validate_item_flex(struct rte_eth_dev *dev,
6739                            const struct rte_flow_item *item,
6740                            uint64_t item_flags,
6741                            uint64_t *last_item,
6742                            bool is_inner,
6743                            struct rte_flow_error *error)
6744 {
6745         const struct rte_flow_item_flex *flow_spec = item->spec;
6746         const struct rte_flow_item_flex *flow_mask = item->mask;
6747         struct mlx5_flex_item *flex;
6748
6749         if (!flow_spec)
6750                 return rte_flow_error_set(error, EINVAL,
6751                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6752                                           "flex flow item spec cannot be NULL");
6753         if (!flow_mask)
6754                 return rte_flow_error_set(error, EINVAL,
6755                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6756                                           "flex flow item mask cannot be NULL");
6757         if (item->last)
6758                 return rte_flow_error_set(error, ENOTSUP,
6759                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6760                                           "flex flow item last not supported");
6761         if (mlx5_flex_acquire_index(dev, flow_spec->handle, false) < 0)
6762                 return rte_flow_error_set(error, EINVAL,
6763                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6764                                           "invalid flex flow item handle");
6765         flex = (struct mlx5_flex_item *)flow_spec->handle;
6766         switch (flex->tunnel_mode) {
6767         case FLEX_TUNNEL_MODE_SINGLE:
6768                 if (item_flags &
6769                     (MLX5_FLOW_ITEM_OUTER_FLEX | MLX5_FLOW_ITEM_INNER_FLEX))
6770                         rte_flow_error_set(error, EINVAL,
6771                                            RTE_FLOW_ERROR_TYPE_ITEM,
6772                                            NULL, "multiple flex items not supported");
6773                 break;
6774         case FLEX_TUNNEL_MODE_OUTER:
6775                 if (is_inner)
6776                         rte_flow_error_set(error, EINVAL,
6777                                            RTE_FLOW_ERROR_TYPE_ITEM,
6778                                            NULL, "inner flex item was not configured");
6779                 if (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX)
6780                         rte_flow_error_set(error, ENOTSUP,
6781                                            RTE_FLOW_ERROR_TYPE_ITEM,
6782                                            NULL, "multiple flex items not supported");
6783                 break;
6784         case FLEX_TUNNEL_MODE_INNER:
6785                 if (!is_inner)
6786                         rte_flow_error_set(error, EINVAL,
6787                                            RTE_FLOW_ERROR_TYPE_ITEM,
6788                                            NULL, "outer flex item was not configured");
6789                 if (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)
6790                         rte_flow_error_set(error, EINVAL,
6791                                            RTE_FLOW_ERROR_TYPE_ITEM,
6792                                            NULL, "multiple flex items not supported");
6793                 break;
6794         case FLEX_TUNNEL_MODE_MULTI:
6795                 if ((is_inner && (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)) ||
6796                     (!is_inner && (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX))) {
6797                         rte_flow_error_set(error, EINVAL,
6798                                            RTE_FLOW_ERROR_TYPE_ITEM,
6799                                            NULL, "multiple flex items not supported");
6800                 }
6801                 break;
6802         case FLEX_TUNNEL_MODE_TUNNEL:
6803                 if (is_inner || (item_flags & MLX5_FLOW_ITEM_FLEX_TUNNEL))
6804                         rte_flow_error_set(error, EINVAL,
6805                                            RTE_FLOW_ERROR_TYPE_ITEM,
6806                                            NULL, "multiple flex tunnel items not supported");
6807                 break;
6808         default:
6809                 rte_flow_error_set(error, EINVAL,
6810                                    RTE_FLOW_ERROR_TYPE_ITEM,
6811                                    NULL, "invalid flex item configuration");
6812         }
6813         *last_item = flex->tunnel_mode == FLEX_TUNNEL_MODE_TUNNEL ?
6814                      MLX5_FLOW_ITEM_FLEX_TUNNEL : is_inner ?
6815                      MLX5_FLOW_ITEM_INNER_FLEX : MLX5_FLOW_ITEM_OUTER_FLEX;
6816         return 0;
6817 }
6818
6819 /**
6820  * Internal validation function. For validating both actions and items.
6821  *
6822  * @param[in] dev
6823  *   Pointer to the rte_eth_dev structure.
6824  * @param[in] attr
6825  *   Pointer to the flow attributes.
6826  * @param[in] items
6827  *   Pointer to the list of items.
6828  * @param[in] actions
6829  *   Pointer to the list of actions.
6830  * @param[in] external
6831  *   This flow rule is created by request external to PMD.
6832  * @param[in] hairpin
6833  *   Number of hairpin TX actions, 0 means classic flow.
6834  * @param[out] error
6835  *   Pointer to the error structure.
6836  *
6837  * @return
6838  *   0 on success, a negative errno value otherwise and rte_errno is set.
6839  */
6840 static int
6841 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6842                  const struct rte_flow_item items[],
6843                  const struct rte_flow_action actions[],
6844                  bool external, int hairpin, struct rte_flow_error *error)
6845 {
6846         int ret;
6847         uint64_t aso_mask, action_flags = 0;
6848         uint64_t item_flags = 0;
6849         uint64_t last_item = 0;
6850         uint8_t next_protocol = 0xff;
6851         uint16_t ether_type = 0;
6852         int actions_n = 0;
6853         uint8_t item_ipv6_proto = 0;
6854         int fdb_mirror_limit = 0;
6855         int modify_after_mirror = 0;
6856         const struct rte_flow_item *geneve_item = NULL;
6857         const struct rte_flow_item *gre_item = NULL;
6858         const struct rte_flow_item *gtp_item = NULL;
6859         const struct rte_flow_action_raw_decap *decap;
6860         const struct rte_flow_action_raw_encap *encap;
6861         const struct rte_flow_action_rss *rss = NULL;
6862         const struct rte_flow_action_rss *sample_rss = NULL;
6863         const struct rte_flow_action_count *sample_count = NULL;
6864         const struct rte_flow_item_tcp nic_tcp_mask = {
6865                 .hdr = {
6866                         .tcp_flags = 0xFF,
6867                         .src_port = RTE_BE16(UINT16_MAX),
6868                         .dst_port = RTE_BE16(UINT16_MAX),
6869                 }
6870         };
6871         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6872                 .hdr = {
6873                         .src_addr =
6874                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6875                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6876                         .dst_addr =
6877                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6878                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6879                         .vtc_flow = RTE_BE32(0xffffffff),
6880                         .proto = 0xff,
6881                         .hop_limits = 0xff,
6882                 },
6883                 .has_frag_ext = 1,
6884         };
6885         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6886                 .hdr = {
6887                         .common = {
6888                                 .u32 =
6889                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6890                                         .type = 0xFF,
6891                                         }).u32),
6892                         },
6893                         .dummy[0] = 0xffffffff,
6894                 },
6895         };
6896         struct mlx5_priv *priv = dev->data->dev_private;
6897         struct mlx5_sh_config *dev_conf = &priv->sh->config;
6898         uint16_t queue_index = 0xFFFF;
6899         const struct rte_flow_item_vlan *vlan_m = NULL;
6900         uint32_t rw_act_num = 0;
6901         uint64_t is_root;
6902         const struct mlx5_flow_tunnel *tunnel;
6903         enum mlx5_tof_rule_type tof_rule_type;
6904         struct flow_grp_info grp_info = {
6905                 .external = !!external,
6906                 .transfer = !!attr->transfer,
6907                 .fdb_def_rule = !!priv->fdb_def_rule,
6908                 .std_tbl_fix = true,
6909         };
6910         const struct rte_eth_hairpin_conf *conf;
6911         const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
6912         const struct rte_flow_item *port_id_item = NULL;
6913         bool def_policy = false;
6914         bool shared_count = false;
6915         uint16_t udp_dport = 0;
6916         uint32_t tag_id = 0;
6917         const struct rte_flow_action_age *non_shared_age = NULL;
6918         const struct rte_flow_action_count *count = NULL;
6919
6920         if (items == NULL)
6921                 return -1;
6922         tunnel = is_tunnel_offload_active(dev) ?
6923                  mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
6924         if (tunnel) {
6925                 if (!dev_conf->dv_flow_en)
6926                         return rte_flow_error_set
6927                                 (error, ENOTSUP,
6928                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6929                                  NULL, "tunnel offload requires DV flow interface");
6930                 if (priv->representor)
6931                         return rte_flow_error_set
6932                                 (error, ENOTSUP,
6933                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6934                                  NULL, "decap not supported for VF representor");
6935                 if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
6936                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6937                 else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
6938                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6939                                         MLX5_FLOW_ACTION_DECAP;
6940                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6941                                         (dev, attr, tunnel, tof_rule_type);
6942         }
6943         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6944         if (ret < 0)
6945                 return ret;
6946         is_root = (uint64_t)ret;
6947         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6948                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6949                 int type = items->type;
6950
6951                 if (!mlx5_flow_os_item_supported(type))
6952                         return rte_flow_error_set(error, ENOTSUP,
6953                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6954                                                   NULL, "item not supported");
6955                 switch (type) {
6956                 case RTE_FLOW_ITEM_TYPE_VOID:
6957                         break;
6958                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6959                         ret = flow_dv_validate_item_port_id
6960                                         (dev, items, attr, item_flags, error);
6961                         if (ret < 0)
6962                                 return ret;
6963                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6964                         port_id_item = items;
6965                         break;
6966                 case RTE_FLOW_ITEM_TYPE_ETH:
6967                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6968                                                           true, error);
6969                         if (ret < 0)
6970                                 return ret;
6971                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6972                                              MLX5_FLOW_LAYER_OUTER_L2;
6973                         if (items->mask != NULL && items->spec != NULL) {
6974                                 ether_type =
6975                                         ((const struct rte_flow_item_eth *)
6976                                          items->spec)->type;
6977                                 ether_type &=
6978                                         ((const struct rte_flow_item_eth *)
6979                                          items->mask)->type;
6980                                 ether_type = rte_be_to_cpu_16(ether_type);
6981                         } else {
6982                                 ether_type = 0;
6983                         }
6984                         break;
6985                 case RTE_FLOW_ITEM_TYPE_VLAN:
6986                         ret = flow_dv_validate_item_vlan(items, item_flags,
6987                                                          dev, error);
6988                         if (ret < 0)
6989                                 return ret;
6990                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6991                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6992                         if (items->mask != NULL && items->spec != NULL) {
6993                                 ether_type =
6994                                         ((const struct rte_flow_item_vlan *)
6995                                          items->spec)->inner_type;
6996                                 ether_type &=
6997                                         ((const struct rte_flow_item_vlan *)
6998                                          items->mask)->inner_type;
6999                                 ether_type = rte_be_to_cpu_16(ether_type);
7000                         } else {
7001                                 ether_type = 0;
7002                         }
7003                         /* Store outer VLAN mask for of_push_vlan action. */
7004                         if (!tunnel)
7005                                 vlan_m = items->mask;
7006                         break;
7007                 case RTE_FLOW_ITEM_TYPE_IPV4:
7008                         mlx5_flow_tunnel_ip_check(items, next_protocol,
7009                                                   &item_flags, &tunnel);
7010                         ret = flow_dv_validate_item_ipv4(dev, items, item_flags,
7011                                                          last_item, ether_type,
7012                                                          error);
7013                         if (ret < 0)
7014                                 return ret;
7015                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
7016                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
7017                         if (items->mask != NULL &&
7018                             ((const struct rte_flow_item_ipv4 *)
7019                              items->mask)->hdr.next_proto_id) {
7020                                 next_protocol =
7021                                         ((const struct rte_flow_item_ipv4 *)
7022                                          (items->spec))->hdr.next_proto_id;
7023                                 next_protocol &=
7024                                         ((const struct rte_flow_item_ipv4 *)
7025                                          (items->mask))->hdr.next_proto_id;
7026                         } else {
7027                                 /* Reset for inner layer. */
7028                                 next_protocol = 0xff;
7029                         }
7030                         break;
7031                 case RTE_FLOW_ITEM_TYPE_IPV6:
7032                         mlx5_flow_tunnel_ip_check(items, next_protocol,
7033                                                   &item_flags, &tunnel);
7034                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
7035                                                            last_item,
7036                                                            ether_type,
7037                                                            &nic_ipv6_mask,
7038                                                            error);
7039                         if (ret < 0)
7040                                 return ret;
7041                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
7042                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
7043                         if (items->mask != NULL &&
7044                             ((const struct rte_flow_item_ipv6 *)
7045                              items->mask)->hdr.proto) {
7046                                 item_ipv6_proto =
7047                                         ((const struct rte_flow_item_ipv6 *)
7048                                          items->spec)->hdr.proto;
7049                                 next_protocol =
7050                                         ((const struct rte_flow_item_ipv6 *)
7051                                          items->spec)->hdr.proto;
7052                                 next_protocol &=
7053                                         ((const struct rte_flow_item_ipv6 *)
7054                                          items->mask)->hdr.proto;
7055                         } else {
7056                                 /* Reset for inner layer. */
7057                                 next_protocol = 0xff;
7058                         }
7059                         break;
7060                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
7061                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
7062                                                                   item_flags,
7063                                                                   error);
7064                         if (ret < 0)
7065                                 return ret;
7066                         last_item = tunnel ?
7067                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
7068                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
7069                         if (items->mask != NULL &&
7070                             ((const struct rte_flow_item_ipv6_frag_ext *)
7071                              items->mask)->hdr.next_header) {
7072                                 next_protocol =
7073                                 ((const struct rte_flow_item_ipv6_frag_ext *)
7074                                  items->spec)->hdr.next_header;
7075                                 next_protocol &=
7076                                 ((const struct rte_flow_item_ipv6_frag_ext *)
7077                                  items->mask)->hdr.next_header;
7078                         } else {
7079                                 /* Reset for inner layer. */
7080                                 next_protocol = 0xff;
7081                         }
7082                         break;
7083                 case RTE_FLOW_ITEM_TYPE_TCP:
7084                         ret = mlx5_flow_validate_item_tcp
7085                                                 (items, item_flags,
7086                                                  next_protocol,
7087                                                  &nic_tcp_mask,
7088                                                  error);
7089                         if (ret < 0)
7090                                 return ret;
7091                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
7092                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
7093                         break;
7094                 case RTE_FLOW_ITEM_TYPE_UDP:
7095                         ret = mlx5_flow_validate_item_udp(items, item_flags,
7096                                                           next_protocol,
7097                                                           error);
7098                         const struct rte_flow_item_udp *spec = items->spec;
7099                         const struct rte_flow_item_udp *mask = items->mask;
7100                         if (!mask)
7101                                 mask = &rte_flow_item_udp_mask;
7102                         if (spec != NULL)
7103                                 udp_dport = rte_be_to_cpu_16
7104                                                 (spec->hdr.dst_port &
7105                                                  mask->hdr.dst_port);
7106                         if (ret < 0)
7107                                 return ret;
7108                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
7109                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
7110                         break;
7111                 case RTE_FLOW_ITEM_TYPE_GRE:
7112                         ret = mlx5_flow_validate_item_gre(items, item_flags,
7113                                                           next_protocol, error);
7114                         if (ret < 0)
7115                                 return ret;
7116                         gre_item = items;
7117                         last_item = MLX5_FLOW_LAYER_GRE;
7118                         break;
7119                 case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
7120                         ret = mlx5_flow_validate_item_gre_option(dev, items, item_flags,
7121                                                           attr, gre_item, error);
7122                         if (ret < 0)
7123                                 return ret;
7124                         last_item = MLX5_FLOW_LAYER_GRE;
7125                         break;
7126                 case RTE_FLOW_ITEM_TYPE_NVGRE:
7127                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
7128                                                             next_protocol,
7129                                                             error);
7130                         if (ret < 0)
7131                                 return ret;
7132                         last_item = MLX5_FLOW_LAYER_NVGRE;
7133                         break;
7134                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
7135                         ret = mlx5_flow_validate_item_gre_key
7136                                 (items, item_flags, gre_item, error);
7137                         if (ret < 0)
7138                                 return ret;
7139                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
7140                         break;
7141                 case RTE_FLOW_ITEM_TYPE_VXLAN:
7142                         ret = mlx5_flow_validate_item_vxlan(dev, udp_dport,
7143                                                             items, item_flags,
7144                                                             attr, error);
7145                         if (ret < 0)
7146                                 return ret;
7147                         last_item = MLX5_FLOW_LAYER_VXLAN;
7148                         break;
7149                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7150                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
7151                                                                 item_flags, dev,
7152                                                                 error);
7153                         if (ret < 0)
7154                                 return ret;
7155                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
7156                         break;
7157                 case RTE_FLOW_ITEM_TYPE_GENEVE:
7158                         ret = mlx5_flow_validate_item_geneve(items,
7159                                                              item_flags, dev,
7160                                                              error);
7161                         if (ret < 0)
7162                                 return ret;
7163                         geneve_item = items;
7164                         last_item = MLX5_FLOW_LAYER_GENEVE;
7165                         break;
7166                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
7167                         ret = mlx5_flow_validate_item_geneve_opt(items,
7168                                                                  last_item,
7169                                                                  geneve_item,
7170                                                                  dev,
7171                                                                  error);
7172                         if (ret < 0)
7173                                 return ret;
7174                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
7175                         break;
7176                 case RTE_FLOW_ITEM_TYPE_MPLS:
7177                         ret = mlx5_flow_validate_item_mpls(dev, items,
7178                                                            item_flags,
7179                                                            last_item, error);
7180                         if (ret < 0)
7181                                 return ret;
7182                         last_item = MLX5_FLOW_LAYER_MPLS;
7183                         break;
7184
7185                 case RTE_FLOW_ITEM_TYPE_MARK:
7186                         ret = flow_dv_validate_item_mark(dev, items, attr,
7187                                                          error);
7188                         if (ret < 0)
7189                                 return ret;
7190                         last_item = MLX5_FLOW_ITEM_MARK;
7191                         break;
7192                 case RTE_FLOW_ITEM_TYPE_META:
7193                         ret = flow_dv_validate_item_meta(dev, items, attr,
7194                                                          error);
7195                         if (ret < 0)
7196                                 return ret;
7197                         last_item = MLX5_FLOW_ITEM_METADATA;
7198                         break;
7199                 case RTE_FLOW_ITEM_TYPE_ICMP:
7200                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
7201                                                            next_protocol,
7202                                                            error);
7203                         if (ret < 0)
7204                                 return ret;
7205                         last_item = MLX5_FLOW_LAYER_ICMP;
7206                         break;
7207                 case RTE_FLOW_ITEM_TYPE_ICMP6:
7208                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
7209                                                             next_protocol,
7210                                                             error);
7211                         if (ret < 0)
7212                                 return ret;
7213                         item_ipv6_proto = IPPROTO_ICMPV6;
7214                         last_item = MLX5_FLOW_LAYER_ICMP6;
7215                         break;
7216                 case RTE_FLOW_ITEM_TYPE_TAG:
7217                         ret = flow_dv_validate_item_tag(dev, items,
7218                                                         attr, error);
7219                         if (ret < 0)
7220                                 return ret;
7221                         last_item = MLX5_FLOW_ITEM_TAG;
7222                         break;
7223                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
7224                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
7225                         break;
7226                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
7227                         break;
7228                 case RTE_FLOW_ITEM_TYPE_GTP:
7229                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
7230                                                         error);
7231                         if (ret < 0)
7232                                 return ret;
7233                         gtp_item = items;
7234                         last_item = MLX5_FLOW_LAYER_GTP;
7235                         break;
7236                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
7237                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
7238                                                             gtp_item, attr,
7239                                                             error);
7240                         if (ret < 0)
7241                                 return ret;
7242                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
7243                         break;
7244                 case RTE_FLOW_ITEM_TYPE_ECPRI:
7245                         /* Capacity will be checked in the translate stage. */
7246                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
7247                                                             last_item,
7248                                                             ether_type,
7249                                                             &nic_ecpri_mask,
7250                                                             error);
7251                         if (ret < 0)
7252                                 return ret;
7253                         last_item = MLX5_FLOW_LAYER_ECPRI;
7254                         break;
7255                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
7256                         ret = flow_dv_validate_item_integrity(dev, items,
7257                                                               item_flags,
7258                                                               &last_item,
7259                                                               integrity_items,
7260                                                               error);
7261                         if (ret < 0)
7262                                 return ret;
7263                         break;
7264                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
7265                         ret = flow_dv_validate_item_aso_ct(dev, items,
7266                                                            &item_flags, error);
7267                         if (ret < 0)
7268                                 return ret;
7269                         break;
7270                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
7271                         /* tunnel offload item was processed before
7272                          * list it here as a supported type
7273                          */
7274                         break;
7275                 case RTE_FLOW_ITEM_TYPE_FLEX:
7276                         ret = flow_dv_validate_item_flex(dev, items, item_flags,
7277                                                          &last_item,
7278                                                          tunnel != 0, error);
7279                         if (ret < 0)
7280                                 return ret;
7281                         break;
7282                 default:
7283                         return rte_flow_error_set(error, ENOTSUP,
7284                                                   RTE_FLOW_ERROR_TYPE_ITEM,
7285                                                   NULL, "item not supported");
7286                 }
7287                 item_flags |= last_item;
7288         }
7289         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
7290                 ret = flow_dv_validate_item_integrity_post(integrity_items,
7291                                                            item_flags, error);
7292                 if (ret)
7293                         return ret;
7294         }
7295         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
7296                 int type = actions->type;
7297
7298                 if (!mlx5_flow_os_action_supported(type))
7299                         return rte_flow_error_set(error, ENOTSUP,
7300                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7301                                                   actions,
7302                                                   "action not supported");
7303                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
7304                         return rte_flow_error_set(error, ENOTSUP,
7305                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7306                                                   actions, "too many actions");
7307                 if (action_flags &
7308                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
7309                         return rte_flow_error_set(error, ENOTSUP,
7310                                 RTE_FLOW_ERROR_TYPE_ACTION,
7311                                 NULL, "meter action with policy "
7312                                 "must be the last action");
7313                 switch (type) {
7314                 case RTE_FLOW_ACTION_TYPE_VOID:
7315                         break;
7316                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7317                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
7318                         ret = flow_dv_validate_action_port_id(dev,
7319                                                               action_flags,
7320                                                               actions,
7321                                                               attr,
7322                                                               error);
7323                         if (ret)
7324                                 return ret;
7325                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7326                         ++actions_n;
7327                         break;
7328                 case RTE_FLOW_ACTION_TYPE_FLAG:
7329                         ret = flow_dv_validate_action_flag(dev, action_flags,
7330                                                            attr, error);
7331                         if (ret < 0)
7332                                 return ret;
7333                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7334                                 /* Count all modify-header actions as one. */
7335                                 if (!(action_flags &
7336                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7337                                         ++actions_n;
7338                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
7339                                                 MLX5_FLOW_ACTION_MARK_EXT;
7340                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7341                                         modify_after_mirror = 1;
7342
7343                         } else {
7344                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
7345                                 ++actions_n;
7346                         }
7347                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7348                         break;
7349                 case RTE_FLOW_ACTION_TYPE_MARK:
7350                         ret = flow_dv_validate_action_mark(dev, actions,
7351                                                            action_flags,
7352                                                            attr, error);
7353                         if (ret < 0)
7354                                 return ret;
7355                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7356                                 /* Count all modify-header actions as one. */
7357                                 if (!(action_flags &
7358                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7359                                         ++actions_n;
7360                                 action_flags |= MLX5_FLOW_ACTION_MARK |
7361                                                 MLX5_FLOW_ACTION_MARK_EXT;
7362                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7363                                         modify_after_mirror = 1;
7364                         } else {
7365                                 action_flags |= MLX5_FLOW_ACTION_MARK;
7366                                 ++actions_n;
7367                         }
7368                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7369                         break;
7370                 case RTE_FLOW_ACTION_TYPE_SET_META:
7371                         ret = flow_dv_validate_action_set_meta(dev, actions,
7372                                                                action_flags,
7373                                                                attr, error);
7374                         if (ret < 0)
7375                                 return ret;
7376                         /* Count all modify-header actions as one action. */
7377                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7378                                 ++actions_n;
7379                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7380                                 modify_after_mirror = 1;
7381                         action_flags |= MLX5_FLOW_ACTION_SET_META;
7382                         rw_act_num += MLX5_ACT_NUM_SET_META;
7383                         break;
7384                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7385                         ret = flow_dv_validate_action_set_tag(dev, actions,
7386                                                               action_flags,
7387                                                               attr, error);
7388                         if (ret < 0)
7389                                 return ret;
7390                         /* Count all modify-header actions as one action. */
7391                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7392                                 ++actions_n;
7393                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7394                                 modify_after_mirror = 1;
7395                         tag_id = ((const struct rte_flow_action_set_tag *)
7396                                   actions->conf)->index;
7397                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7398                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7399                         break;
7400                 case RTE_FLOW_ACTION_TYPE_DROP:
7401                         ret = mlx5_flow_validate_action_drop(action_flags,
7402                                                              attr, error);
7403                         if (ret < 0)
7404                                 return ret;
7405                         action_flags |= MLX5_FLOW_ACTION_DROP;
7406                         ++actions_n;
7407                         break;
7408                 case RTE_FLOW_ACTION_TYPE_QUEUE:
7409                         ret = mlx5_flow_validate_action_queue(actions,
7410                                                               action_flags, dev,
7411                                                               attr, error);
7412                         if (ret < 0)
7413                                 return ret;
7414                         queue_index = ((const struct rte_flow_action_queue *)
7415                                                         (actions->conf))->index;
7416                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
7417                         ++actions_n;
7418                         break;
7419                 case RTE_FLOW_ACTION_TYPE_RSS:
7420                         rss = actions->conf;
7421                         ret = mlx5_flow_validate_action_rss(actions,
7422                                                             action_flags, dev,
7423                                                             attr, item_flags,
7424                                                             error);
7425                         if (ret < 0)
7426                                 return ret;
7427                         if (rss && sample_rss &&
7428                             (sample_rss->level != rss->level ||
7429                             sample_rss->types != rss->types))
7430                                 return rte_flow_error_set(error, ENOTSUP,
7431                                         RTE_FLOW_ERROR_TYPE_ACTION,
7432                                         NULL,
7433                                         "Can't use the different RSS types "
7434                                         "or level in the same flow");
7435                         if (rss != NULL && rss->queue_num)
7436                                 queue_index = rss->queue[0];
7437                         action_flags |= MLX5_FLOW_ACTION_RSS;
7438                         ++actions_n;
7439                         break;
7440                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7441                         ret =
7442                         mlx5_flow_validate_action_default_miss(action_flags,
7443                                         attr, error);
7444                         if (ret < 0)
7445                                 return ret;
7446                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7447                         ++actions_n;
7448                         break;
7449                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
7450                         shared_count = true;
7451                         /* fall-through. */
7452                 case RTE_FLOW_ACTION_TYPE_COUNT:
7453                         ret = flow_dv_validate_action_count(dev, shared_count,
7454                                                             action_flags,
7455                                                             attr, error);
7456                         if (ret < 0)
7457                                 return ret;
7458                         count = actions->conf;
7459                         action_flags |= MLX5_FLOW_ACTION_COUNT;
7460                         ++actions_n;
7461                         break;
7462                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7463                         if (flow_dv_validate_action_pop_vlan(dev,
7464                                                              action_flags,
7465                                                              actions,
7466                                                              item_flags, attr,
7467                                                              error))
7468                                 return -rte_errno;
7469                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7470                                 modify_after_mirror = 1;
7471                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7472                         ++actions_n;
7473                         break;
7474                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7475                         ret = flow_dv_validate_action_push_vlan(dev,
7476                                                                 action_flags,
7477                                                                 vlan_m,
7478                                                                 actions, attr,
7479                                                                 error);
7480                         if (ret < 0)
7481                                 return ret;
7482                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7483                                 modify_after_mirror = 1;
7484                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7485                         ++actions_n;
7486                         break;
7487                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7488                         ret = flow_dv_validate_action_set_vlan_pcp
7489                                                 (action_flags, actions, error);
7490                         if (ret < 0)
7491                                 return ret;
7492                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7493                                 modify_after_mirror = 1;
7494                         /* Count PCP with push_vlan command. */
7495                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
7496                         break;
7497                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7498                         ret = flow_dv_validate_action_set_vlan_vid
7499                                                 (item_flags, action_flags,
7500                                                  actions, error);
7501                         if (ret < 0)
7502                                 return ret;
7503                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7504                                 modify_after_mirror = 1;
7505                         /* Count VID with push_vlan command. */
7506                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7507                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
7508                         break;
7509                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7510                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7511                         ret = flow_dv_validate_action_l2_encap(dev,
7512                                                                action_flags,
7513                                                                actions, attr,
7514                                                                error);
7515                         if (ret < 0)
7516                                 return ret;
7517                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
7518                         ++actions_n;
7519                         break;
7520                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7521                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7522                         ret = flow_dv_validate_action_decap(dev, action_flags,
7523                                                             actions, item_flags,
7524                                                             attr, error);
7525                         if (ret < 0)
7526                                 return ret;
7527                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7528                                 modify_after_mirror = 1;
7529                         action_flags |= MLX5_FLOW_ACTION_DECAP;
7530                         ++actions_n;
7531                         break;
7532                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7533                         ret = flow_dv_validate_action_raw_encap_decap
7534                                 (dev, NULL, actions->conf, attr, &action_flags,
7535                                  &actions_n, actions, item_flags, error);
7536                         if (ret < 0)
7537                                 return ret;
7538                         break;
7539                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7540                         decap = actions->conf;
7541                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
7542                                 ;
7543                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7544                                 encap = NULL;
7545                                 actions--;
7546                         } else {
7547                                 encap = actions->conf;
7548                         }
7549                         ret = flow_dv_validate_action_raw_encap_decap
7550                                            (dev,
7551                                             decap ? decap : &empty_decap, encap,
7552                                             attr, &action_flags, &actions_n,
7553                                             actions, item_flags, error);
7554                         if (ret < 0)
7555                                 return ret;
7556                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7557                             (action_flags & MLX5_FLOW_ACTION_DECAP))
7558                                 modify_after_mirror = 1;
7559                         break;
7560                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7561                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7562                         ret = flow_dv_validate_action_modify_mac(action_flags,
7563                                                                  actions,
7564                                                                  item_flags,
7565                                                                  error);
7566                         if (ret < 0)
7567                                 return ret;
7568                         /* Count all modify-header actions as one action. */
7569                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7570                                 ++actions_n;
7571                         action_flags |= actions->type ==
7572                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7573                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
7574                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
7575                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7576                                 modify_after_mirror = 1;
7577                         /*
7578                          * Even if the source and destination MAC addresses have
7579                          * overlap in the header with 4B alignment, the convert
7580                          * function will handle them separately and 4 SW actions
7581                          * will be created. And 2 actions will be added each
7582                          * time no matter how many bytes of address will be set.
7583                          */
7584                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
7585                         break;
7586                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7587                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7588                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
7589                                                                   actions,
7590                                                                   item_flags,
7591                                                                   error);
7592                         if (ret < 0)
7593                                 return ret;
7594                         /* Count all modify-header actions as one action. */
7595                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7596                                 ++actions_n;
7597                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7598                                 modify_after_mirror = 1;
7599                         action_flags |= actions->type ==
7600                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7601                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
7602                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
7603                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
7604                         break;
7605                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7606                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7607                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
7608                                                                   actions,
7609                                                                   item_flags,
7610                                                                   error);
7611                         if (ret < 0)
7612                                 return ret;
7613                         if (item_ipv6_proto == IPPROTO_ICMPV6)
7614                                 return rte_flow_error_set(error, ENOTSUP,
7615                                         RTE_FLOW_ERROR_TYPE_ACTION,
7616                                         actions,
7617                                         "Can't change header "
7618                                         "with ICMPv6 proto");
7619                         /* Count all modify-header actions as one action. */
7620                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7621                                 ++actions_n;
7622                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7623                                 modify_after_mirror = 1;
7624                         action_flags |= actions->type ==
7625                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7626                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7627                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
7628                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
7629                         break;
7630                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7631                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7632                         ret = flow_dv_validate_action_modify_tp(action_flags,
7633                                                                 actions,
7634                                                                 item_flags,
7635                                                                 error);
7636                         if (ret < 0)
7637                                 return ret;
7638                         /* Count all modify-header actions as one action. */
7639                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7640                                 ++actions_n;
7641                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7642                                 modify_after_mirror = 1;
7643                         action_flags |= actions->type ==
7644                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7645                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
7646                                                 MLX5_FLOW_ACTION_SET_TP_DST;
7647                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7648                         break;
7649                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7650                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7651                         ret = flow_dv_validate_action_modify_ttl(action_flags,
7652                                                                  actions,
7653                                                                  item_flags,
7654                                                                  error);
7655                         if (ret < 0)
7656                                 return ret;
7657                         /* Count all modify-header actions as one action. */
7658                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7659                                 ++actions_n;
7660                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7661                                 modify_after_mirror = 1;
7662                         action_flags |= actions->type ==
7663                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
7664                                                 MLX5_FLOW_ACTION_SET_TTL :
7665                                                 MLX5_FLOW_ACTION_DEC_TTL;
7666                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7667                         break;
7668                 case RTE_FLOW_ACTION_TYPE_JUMP:
7669                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7670                                                            action_flags,
7671                                                            attr, external,
7672                                                            error);
7673                         if (ret)
7674                                 return ret;
7675                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7676                             fdb_mirror_limit)
7677                                 return rte_flow_error_set(error, EINVAL,
7678                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7679                                                   NULL,
7680                                                   "sample and jump action combination is not supported");
7681                         ++actions_n;
7682                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7683                         break;
7684                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7685                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7686                         ret = flow_dv_validate_action_modify_tcp_seq
7687                                                                 (action_flags,
7688                                                                  actions,
7689                                                                  item_flags,
7690                                                                  error);
7691                         if (ret < 0)
7692                                 return ret;
7693                         /* Count all modify-header actions as one action. */
7694                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7695                                 ++actions_n;
7696                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7697                                 modify_after_mirror = 1;
7698                         action_flags |= actions->type ==
7699                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7700                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7701                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7702                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7703                         break;
7704                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7705                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7706                         ret = flow_dv_validate_action_modify_tcp_ack
7707                                                                 (action_flags,
7708                                                                  actions,
7709                                                                  item_flags,
7710                                                                  error);
7711                         if (ret < 0)
7712                                 return ret;
7713                         /* Count all modify-header actions as one action. */
7714                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7715                                 ++actions_n;
7716                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7717                                 modify_after_mirror = 1;
7718                         action_flags |= actions->type ==
7719                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7720                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
7721                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7722                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7723                         break;
7724                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7725                         break;
7726                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7727                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7728                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7729                         break;
7730                 case RTE_FLOW_ACTION_TYPE_METER:
7731                         ret = mlx5_flow_validate_action_meter(dev,
7732                                                               action_flags,
7733                                                               item_flags,
7734                                                               actions, attr,
7735                                                               port_id_item,
7736                                                               &def_policy,
7737                                                               error);
7738                         if (ret < 0)
7739                                 return ret;
7740                         action_flags |= MLX5_FLOW_ACTION_METER;
7741                         if (!def_policy)
7742                                 action_flags |=
7743                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7744                         ++actions_n;
7745                         /* Meter action will add one more TAG action. */
7746                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7747                         break;
7748                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7749                         if (!attr->transfer && !attr->group)
7750                                 return rte_flow_error_set(error, ENOTSUP,
7751                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7752                                                                            NULL,
7753                           "Shared ASO age action is not supported for group 0");
7754                         if (action_flags & MLX5_FLOW_ACTION_AGE)
7755                                 return rte_flow_error_set
7756                                                   (error, EINVAL,
7757                                                    RTE_FLOW_ERROR_TYPE_ACTION,
7758                                                    NULL,
7759                                                    "duplicate age actions set");
7760                         action_flags |= MLX5_FLOW_ACTION_AGE;
7761                         ++actions_n;
7762                         break;
7763                 case RTE_FLOW_ACTION_TYPE_AGE:
7764                         non_shared_age = actions->conf;
7765                         ret = flow_dv_validate_action_age(action_flags,
7766                                                           actions, dev,
7767                                                           error);
7768                         if (ret < 0)
7769                                 return ret;
7770                         /*
7771                          * Validate the regular AGE action (using counter)
7772                          * mutual exclusion with indirect counter actions.
7773                          */
7774                         if (!flow_hit_aso_supported(priv->sh, attr)) {
7775                                 if (shared_count)
7776                                         return rte_flow_error_set
7777                                                 (error, EINVAL,
7778                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7779                                                 NULL,
7780                                                 "old age and indirect count combination is not supported");
7781                                 if (sample_count)
7782                                         return rte_flow_error_set
7783                                                 (error, EINVAL,
7784                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7785                                                 NULL,
7786                                                 "old age action and count must be in the same sub flow");
7787                         }
7788                         action_flags |= MLX5_FLOW_ACTION_AGE;
7789                         ++actions_n;
7790                         break;
7791                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7792                         ret = flow_dv_validate_action_modify_ipv4_dscp
7793                                                          (action_flags,
7794                                                           actions,
7795                                                           item_flags,
7796                                                           error);
7797                         if (ret < 0)
7798                                 return ret;
7799                         /* Count all modify-header actions as one action. */
7800                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7801                                 ++actions_n;
7802                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7803                                 modify_after_mirror = 1;
7804                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7805                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7806                         break;
7807                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7808                         ret = flow_dv_validate_action_modify_ipv6_dscp
7809                                                                 (action_flags,
7810                                                                  actions,
7811                                                                  item_flags,
7812                                                                  error);
7813                         if (ret < 0)
7814                                 return ret;
7815                         /* Count all modify-header actions as one action. */
7816                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7817                                 ++actions_n;
7818                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7819                                 modify_after_mirror = 1;
7820                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7821                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7822                         break;
7823                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7824                         ret = flow_dv_validate_action_sample(&action_flags,
7825                                                              actions, dev,
7826                                                              attr, item_flags,
7827                                                              rss, &sample_rss,
7828                                                              &sample_count,
7829                                                              &fdb_mirror_limit,
7830                                                              error);
7831                         if (ret < 0)
7832                                 return ret;
7833                         if ((action_flags & MLX5_FLOW_ACTION_SET_TAG) &&
7834                             tag_id == 0 && priv->mtr_color_reg == REG_NON)
7835                                 return rte_flow_error_set(error, EINVAL,
7836                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7837                                         "sample after tag action causes metadata tag index 0 corruption");
7838                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7839                         ++actions_n;
7840                         break;
7841                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7842                         ret = flow_dv_validate_action_modify_field(dev,
7843                                                                    action_flags,
7844                                                                    actions,
7845                                                                    attr,
7846                                                                    error);
7847                         if (ret < 0)
7848                                 return ret;
7849                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7850                                 modify_after_mirror = 1;
7851                         /* Count all modify-header actions as one action. */
7852                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7853                                 ++actions_n;
7854                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7855                         rw_act_num += ret;
7856                         break;
7857                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7858                         ret = flow_dv_validate_action_aso_ct(dev, action_flags,
7859                                                              item_flags, attr,
7860                                                              error);
7861                         if (ret < 0)
7862                                 return ret;
7863                         action_flags |= MLX5_FLOW_ACTION_CT;
7864                         break;
7865                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7866                         /* tunnel offload action was processed before
7867                          * list it here as a supported type
7868                          */
7869                         break;
7870                 default:
7871                         return rte_flow_error_set(error, ENOTSUP,
7872                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7873                                                   actions,
7874                                                   "action not supported");
7875                 }
7876         }
7877         /*
7878          * Validate actions in flow rules
7879          * - Explicit decap action is prohibited by the tunnel offload API.
7880          * - Drop action in tunnel steer rule is prohibited by the API.
7881          * - Application cannot use MARK action because it's value can mask
7882          *   tunnel default miss notification.
7883          * - JUMP in tunnel match rule has no support in current PMD
7884          *   implementation.
7885          * - TAG & META are reserved for future uses.
7886          */
7887         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7888                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
7889                                             MLX5_FLOW_ACTION_MARK     |
7890                                             MLX5_FLOW_ACTION_SET_TAG  |
7891                                             MLX5_FLOW_ACTION_SET_META |
7892                                             MLX5_FLOW_ACTION_DROP;
7893
7894                 if (action_flags & bad_actions_mask)
7895                         return rte_flow_error_set
7896                                         (error, EINVAL,
7897                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7898                                         "Invalid RTE action in tunnel "
7899                                         "set decap rule");
7900                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7901                         return rte_flow_error_set
7902                                         (error, EINVAL,
7903                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7904                                         "tunnel set decap rule must terminate "
7905                                         "with JUMP");
7906                 if (!attr->ingress)
7907                         return rte_flow_error_set
7908                                         (error, EINVAL,
7909                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7910                                         "tunnel flows for ingress traffic only");
7911         }
7912         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7913                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
7914                                             MLX5_FLOW_ACTION_MARK    |
7915                                             MLX5_FLOW_ACTION_SET_TAG |
7916                                             MLX5_FLOW_ACTION_SET_META;
7917
7918                 if (action_flags & bad_actions_mask)
7919                         return rte_flow_error_set
7920                                         (error, EINVAL,
7921                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7922                                         "Invalid RTE action in tunnel "
7923                                         "set match rule");
7924         }
7925         /*
7926          * Validate the drop action mutual exclusion with other actions.
7927          * Drop action is mutually-exclusive with any other action, except for
7928          * Count action.
7929          * Drop action compatibility with tunnel offload was already validated.
7930          */
7931         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7932                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
7933         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7934             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7935                 return rte_flow_error_set(error, EINVAL,
7936                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7937                                           "Drop action is mutually-exclusive "
7938                                           "with any other action, except for "
7939                                           "Count action");
7940         /* Eswitch has few restrictions on using items and actions */
7941         if (attr->transfer) {
7942                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7943                     action_flags & MLX5_FLOW_ACTION_FLAG)
7944                         return rte_flow_error_set(error, ENOTSUP,
7945                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7946                                                   NULL,
7947                                                   "unsupported action FLAG");
7948                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7949                     action_flags & MLX5_FLOW_ACTION_MARK)
7950                         return rte_flow_error_set(error, ENOTSUP,
7951                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7952                                                   NULL,
7953                                                   "unsupported action MARK");
7954                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7955                         return rte_flow_error_set(error, ENOTSUP,
7956                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7957                                                   NULL,
7958                                                   "unsupported action QUEUE");
7959                 if (action_flags & MLX5_FLOW_ACTION_RSS)
7960                         return rte_flow_error_set(error, ENOTSUP,
7961                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7962                                                   NULL,
7963                                                   "unsupported action RSS");
7964                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7965                         return rte_flow_error_set(error, EINVAL,
7966                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7967                                                   actions,
7968                                                   "no fate action is found");
7969         } else {
7970                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7971                         return rte_flow_error_set(error, EINVAL,
7972                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7973                                                   actions,
7974                                                   "no fate action is found");
7975         }
7976         /*
7977          * Continue validation for Xcap and VLAN actions.
7978          * If hairpin is working in explicit TX rule mode, there is no actions
7979          * splitting and the validation of hairpin ingress flow should be the
7980          * same as other standard flows.
7981          */
7982         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7983                              MLX5_FLOW_VLAN_ACTIONS)) &&
7984             (queue_index == 0xFFFF || !mlx5_rxq_is_hairpin(dev, queue_index) ||
7985              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7986              conf->tx_explicit != 0))) {
7987                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7988                     MLX5_FLOW_XCAP_ACTIONS)
7989                         return rte_flow_error_set(error, ENOTSUP,
7990                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7991                                                   NULL, "encap and decap "
7992                                                   "combination aren't supported");
7993                 /* Push VLAN is not supported in ingress except for NICs newer than CX5. */
7994                 if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) {
7995                         struct mlx5_dev_ctx_shared *sh = priv->sh;
7996                         bool direction_error = false;
7997
7998                         if (attr->transfer) {
7999                                 bool fdb_tx = priv->representor_id != UINT16_MAX;
8000                                 bool is_cx5 = sh->steering_format_version ==
8001                                     MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
8002
8003                                 if (!fdb_tx && is_cx5)
8004                                         direction_error = true;
8005                         } else if (attr->ingress) {
8006                                 direction_error = true;
8007                         }
8008                         if (direction_error)
8009                                 return rte_flow_error_set(error, ENOTSUP,
8010                                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
8011                                                           NULL,
8012                                                           "push VLAN action not supported "
8013                                                           "for ingress");
8014                 }
8015                 if (!attr->transfer && attr->ingress) {
8016                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
8017                                 return rte_flow_error_set
8018                                                 (error, ENOTSUP,
8019                                                  RTE_FLOW_ERROR_TYPE_ACTION,
8020                                                  NULL, "encap is not supported"
8021                                                  " for ingress traffic");
8022                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
8023                                         MLX5_FLOW_VLAN_ACTIONS)
8024                                 return rte_flow_error_set
8025                                                 (error, ENOTSUP,
8026                                                  RTE_FLOW_ERROR_TYPE_ACTION,
8027                                                  NULL, "no support for "
8028                                                  "multiple VLAN actions");
8029                 }
8030         }
8031         if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
8032                 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
8033                         ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
8034                         attr->ingress)
8035                         return rte_flow_error_set
8036                                 (error, ENOTSUP,
8037                                 RTE_FLOW_ERROR_TYPE_ACTION,
8038                                 NULL, "fate action not supported for "
8039                                 "meter with policy");
8040                 if (attr->egress) {
8041                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
8042                                 return rte_flow_error_set
8043                                         (error, ENOTSUP,
8044                                         RTE_FLOW_ERROR_TYPE_ACTION,
8045                                         NULL, "modify header action in egress "
8046                                         "cannot be done before meter action");
8047                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
8048                                 return rte_flow_error_set
8049                                         (error, ENOTSUP,
8050                                         RTE_FLOW_ERROR_TYPE_ACTION,
8051                                         NULL, "encap action in egress "
8052                                         "cannot be done before meter action");
8053                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
8054                                 return rte_flow_error_set
8055                                         (error, ENOTSUP,
8056                                         RTE_FLOW_ERROR_TYPE_ACTION,
8057                                         NULL, "push vlan action in egress "
8058                                         "cannot be done before meter action");
8059                 }
8060         }
8061         /*
8062          * Only support one ASO action in a single flow rule.
8063          * non-shared AGE + counter will fallback to use HW counter, no ASO hit object.
8064          * Group 0 uses HW counter for AGE too even if no counter action.
8065          */
8066         aso_mask = (action_flags & MLX5_FLOW_ACTION_METER && priv->sh->meter_aso_en) << 2 |
8067                    (action_flags & MLX5_FLOW_ACTION_CT && priv->sh->ct_aso_en) << 1 |
8068                    (action_flags & MLX5_FLOW_ACTION_AGE &&
8069                     !(non_shared_age && count) &&
8070                     (attr->group || (attr->transfer && priv->fdb_def_rule)) &&
8071                     priv->sh->flow_hit_aso_en);
8072         if (__builtin_popcountl(aso_mask) > 1)
8073                 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
8074                                           NULL, "unsupported combining AGE, METER, CT ASO actions in a single rule");
8075         /*
8076          * Hairpin flow will add one more TAG action in TX implicit mode.
8077          * In TX explicit mode, there will be no hairpin flow ID.
8078          */
8079         if (hairpin > 0)
8080                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
8081         /* extra metadata enabled: one more TAG action will be add. */
8082         if (dev_conf->dv_flow_en &&
8083             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
8084             mlx5_flow_ext_mreg_supported(dev))
8085                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
8086         if (rw_act_num >
8087                         flow_dv_modify_hdr_action_max(dev, is_root)) {
8088                 return rte_flow_error_set(error, ENOTSUP,
8089                                           RTE_FLOW_ERROR_TYPE_ACTION,
8090                                           NULL, "too many header modify"
8091                                           " actions to support");
8092         }
8093         /* Eswitch egress mirror and modify flow has limitation on CX5 */
8094         if (fdb_mirror_limit && modify_after_mirror)
8095                 return rte_flow_error_set(error, EINVAL,
8096                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
8097                                 "sample before modify action is not supported");
8098         /*
8099          * Validation the NIC Egress flow on representor, except implicit
8100          * hairpin default egress flow with TX_QUEUE item, other flows not
8101          * work due to metadata regC0 mismatch.
8102          */
8103         if ((!attr->transfer && attr->egress) && priv->representor &&
8104             !(item_flags & MLX5_FLOW_ITEM_TX_QUEUE))
8105                 return rte_flow_error_set(error, EINVAL,
8106                                           RTE_FLOW_ERROR_TYPE_ITEM,
8107                                           NULL,
8108                                           "NIC egress rules on representors"
8109                                           " is not supported");
8110         return 0;
8111 }
8112
8113 /**
8114  * Internal preparation function. Allocates the DV flow size,
8115  * this size is constant.
8116  *
8117  * @param[in] dev
8118  *   Pointer to the rte_eth_dev structure.
8119  * @param[in] attr
8120  *   Pointer to the flow attributes.
8121  * @param[in] items
8122  *   Pointer to the list of items.
8123  * @param[in] actions
8124  *   Pointer to the list of actions.
8125  * @param[out] error
8126  *   Pointer to the error structure.
8127  *
8128  * @return
8129  *   Pointer to mlx5_flow object on success,
8130  *   otherwise NULL and rte_errno is set.
8131  */
8132 static struct mlx5_flow *
8133 flow_dv_prepare(struct rte_eth_dev *dev,
8134                 const struct rte_flow_attr *attr __rte_unused,
8135                 const struct rte_flow_item items[] __rte_unused,
8136                 const struct rte_flow_action actions[] __rte_unused,
8137                 struct rte_flow_error *error)
8138 {
8139         uint32_t handle_idx = 0;
8140         struct mlx5_flow *dev_flow;
8141         struct mlx5_flow_handle *dev_handle;
8142         struct mlx5_priv *priv = dev->data->dev_private;
8143         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
8144
8145         MLX5_ASSERT(wks);
8146         wks->skip_matcher_reg = 0;
8147         wks->policy = NULL;
8148         wks->final_policy = NULL;
8149         /* In case of corrupting the memory. */
8150         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
8151                 rte_flow_error_set(error, ENOSPC,
8152                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8153                                    "not free temporary device flow");
8154                 return NULL;
8155         }
8156         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
8157                                    &handle_idx);
8158         if (!dev_handle) {
8159                 rte_flow_error_set(error, ENOMEM,
8160                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8161                                    "not enough memory to create flow handle");
8162                 return NULL;
8163         }
8164         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
8165         dev_flow = &wks->flows[wks->flow_idx++];
8166         memset(dev_flow, 0, sizeof(*dev_flow));
8167         dev_flow->handle = dev_handle;
8168         dev_flow->handle_idx = handle_idx;
8169         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
8170         dev_flow->ingress = attr->ingress;
8171         dev_flow->dv.transfer = attr->transfer;
8172         return dev_flow;
8173 }
8174
8175 #ifdef RTE_LIBRTE_MLX5_DEBUG
8176 /**
8177  * Sanity check for match mask and value. Similar to check_valid_spec() in
8178  * kernel driver. If unmasked bit is present in value, it returns failure.
8179  *
8180  * @param match_mask
8181  *   pointer to match mask buffer.
8182  * @param match_value
8183  *   pointer to match value buffer.
8184  *
8185  * @return
8186  *   0 if valid, -EINVAL otherwise.
8187  */
8188 static int
8189 flow_dv_check_valid_spec(void *match_mask, void *match_value)
8190 {
8191         uint8_t *m = match_mask;
8192         uint8_t *v = match_value;
8193         unsigned int i;
8194
8195         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
8196                 if (v[i] & ~m[i]) {
8197                         DRV_LOG(ERR,
8198                                 "match_value differs from match_criteria"
8199                                 " %p[%u] != %p[%u]",
8200                                 match_value, i, match_mask, i);
8201                         return -EINVAL;
8202                 }
8203         }
8204         return 0;
8205 }
8206 #endif
8207
8208 /**
8209  * Add match of ip_version.
8210  *
8211  * @param[in] group
8212  *   Flow group.
8213  * @param[in] headers_v
8214  *   Values header pointer.
8215  * @param[in] headers_m
8216  *   Masks header pointer.
8217  * @param[in] ip_version
8218  *   The IP version to set.
8219  */
8220 static inline void
8221 flow_dv_set_match_ip_version(uint32_t group,
8222                              void *headers_v,
8223                              void *headers_m,
8224                              uint8_t ip_version)
8225 {
8226         if (group == 0)
8227                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
8228         else
8229                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
8230                          ip_version);
8231         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
8232         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
8233         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
8234 }
8235
8236 /**
8237  * Add Ethernet item to matcher and to the value.
8238  *
8239  * @param[in, out] matcher
8240  *   Flow matcher.
8241  * @param[in, out] key
8242  *   Flow matcher value.
8243  * @param[in] item
8244  *   Flow pattern to translate.
8245  * @param[in] inner
8246  *   Item is inner pattern.
8247  */
8248 static void
8249 flow_dv_translate_item_eth(void *matcher, void *key,
8250                            const struct rte_flow_item *item, int inner,
8251                            uint32_t group)
8252 {
8253         const struct rte_flow_item_eth *eth_m = item->mask;
8254         const struct rte_flow_item_eth *eth_v = item->spec;
8255         const struct rte_flow_item_eth nic_mask = {
8256                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8257                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8258                 .type = RTE_BE16(0xffff),
8259                 .has_vlan = 0,
8260         };
8261         void *hdrs_m;
8262         void *hdrs_v;
8263         char *l24_v;
8264         unsigned int i;
8265
8266         if (!eth_v)
8267                 return;
8268         if (!eth_m)
8269                 eth_m = &nic_mask;
8270         if (inner) {
8271                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8272                                          inner_headers);
8273                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8274         } else {
8275                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8276                                          outer_headers);
8277                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8278         }
8279         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
8280                &eth_m->dst, sizeof(eth_m->dst));
8281         /* The value must be in the range of the mask. */
8282         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
8283         for (i = 0; i < sizeof(eth_m->dst); ++i)
8284                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
8285         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
8286                &eth_m->src, sizeof(eth_m->src));
8287         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
8288         /* The value must be in the range of the mask. */
8289         for (i = 0; i < sizeof(eth_m->dst); ++i)
8290                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
8291         /*
8292          * HW supports match on one Ethertype, the Ethertype following the last
8293          * VLAN tag of the packet (see PRM).
8294          * Set match on ethertype only if ETH header is not followed by VLAN.
8295          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8296          * ethertype, and use ip_version field instead.
8297          * eCPRI over Ether layer will use type value 0xAEFE.
8298          */
8299         if (eth_m->type == 0xFFFF) {
8300                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
8301                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8302                 switch (eth_v->type) {
8303                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8304                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8305                         return;
8306                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
8307                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8308                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8309                         return;
8310                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8311                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8312                         return;
8313                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8314                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8315                         return;
8316                 default:
8317                         break;
8318                 }
8319         }
8320         if (eth_m->has_vlan) {
8321                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8322                 if (eth_v->has_vlan) {
8323                         /*
8324                          * Here, when also has_more_vlan field in VLAN item is
8325                          * not set, only single-tagged packets will be matched.
8326                          */
8327                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8328                         return;
8329                 }
8330         }
8331         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8332                  rte_be_to_cpu_16(eth_m->type));
8333         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
8334         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
8335 }
8336
8337 /**
8338  * Add VLAN item to matcher and to the value.
8339  *
8340  * @param[in, out] dev_flow
8341  *   Flow descriptor.
8342  * @param[in, out] matcher
8343  *   Flow matcher.
8344  * @param[in, out] key
8345  *   Flow matcher value.
8346  * @param[in] item
8347  *   Flow pattern to translate.
8348  * @param[in] inner
8349  *   Item is inner pattern.
8350  */
8351 static void
8352 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
8353                             void *matcher, void *key,
8354                             const struct rte_flow_item *item,
8355                             int inner, uint32_t group)
8356 {
8357         const struct rte_flow_item_vlan *vlan_m = item->mask;
8358         const struct rte_flow_item_vlan *vlan_v = item->spec;
8359         void *hdrs_m;
8360         void *hdrs_v;
8361         uint16_t tci_m;
8362         uint16_t tci_v;
8363
8364         if (inner) {
8365                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8366                                          inner_headers);
8367                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8368         } else {
8369                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8370                                          outer_headers);
8371                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8372                 /*
8373                  * This is workaround, masks are not supported,
8374                  * and pre-validated.
8375                  */
8376                 if (vlan_v)
8377                         dev_flow->handle->vf_vlan.tag =
8378                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
8379         }
8380         /*
8381          * When VLAN item exists in flow, mark packet as tagged,
8382          * even if TCI is not specified.
8383          */
8384         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
8385                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8386                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8387         }
8388         if (!vlan_v)
8389                 return;
8390         if (!vlan_m)
8391                 vlan_m = &rte_flow_item_vlan_mask;
8392         tci_m = rte_be_to_cpu_16(vlan_m->tci);
8393         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
8394         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
8395         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
8396         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
8397         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
8398         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
8399         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
8400         /*
8401          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8402          * ethertype, and use ip_version field instead.
8403          */
8404         if (vlan_m->inner_type == 0xFFFF) {
8405                 switch (vlan_v->inner_type) {
8406                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8407                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8408                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8409                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8410                         return;
8411                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8412                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8413                         return;
8414                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8415                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8416                         return;
8417                 default:
8418                         break;
8419                 }
8420         }
8421         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
8422                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8423                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8424                 /* Only one vlan_tag bit can be set. */
8425                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8426                 return;
8427         }
8428         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8429                  rte_be_to_cpu_16(vlan_m->inner_type));
8430         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
8431                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
8432 }
8433
8434 /**
8435  * Add IPV4 item to matcher and to the value.
8436  *
8437  * @param[in, out] matcher
8438  *   Flow matcher.
8439  * @param[in, out] key
8440  *   Flow matcher value.
8441  * @param[in] item
8442  *   Flow pattern to translate.
8443  * @param[in] inner
8444  *   Item is inner pattern.
8445  * @param[in] group
8446  *   The group to insert the rule.
8447  */
8448 static void
8449 flow_dv_translate_item_ipv4(void *matcher, void *key,
8450                             const struct rte_flow_item *item,
8451                             int inner, uint32_t group)
8452 {
8453         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
8454         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
8455         const struct rte_flow_item_ipv4 nic_mask = {
8456                 .hdr = {
8457                         .src_addr = RTE_BE32(0xffffffff),
8458                         .dst_addr = RTE_BE32(0xffffffff),
8459                         .type_of_service = 0xff,
8460                         .next_proto_id = 0xff,
8461                         .time_to_live = 0xff,
8462                 },
8463         };
8464         void *headers_m;
8465         void *headers_v;
8466         char *l24_m;
8467         char *l24_v;
8468         uint8_t tos, ihl_m, ihl_v;
8469
8470         if (inner) {
8471                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8472                                          inner_headers);
8473                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8474         } else {
8475                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8476                                          outer_headers);
8477                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8478         }
8479         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
8480         if (!ipv4_v)
8481                 return;
8482         if (!ipv4_m)
8483                 ipv4_m = &nic_mask;
8484         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8485                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8486         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8487                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8488         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
8489         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
8490         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8491                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8492         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8493                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8494         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
8495         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
8496         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
8497         ihl_m = ipv4_m->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
8498         ihl_v = ipv4_v->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
8499         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_ihl, ihl_m);
8500         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_ihl, ihl_m & ihl_v);
8501         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
8502                  ipv4_m->hdr.type_of_service);
8503         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
8504         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
8505                  ipv4_m->hdr.type_of_service >> 2);
8506         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
8507         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8508                  ipv4_m->hdr.next_proto_id);
8509         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8510                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
8511         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8512                  ipv4_m->hdr.time_to_live);
8513         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8514                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
8515         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8516                  !!(ipv4_m->hdr.fragment_offset));
8517         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8518                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
8519 }
8520
8521 /**
8522  * Add IPV6 item to matcher and to the value.
8523  *
8524  * @param[in, out] matcher
8525  *   Flow matcher.
8526  * @param[in, out] key
8527  *   Flow matcher value.
8528  * @param[in] item
8529  *   Flow pattern to translate.
8530  * @param[in] inner
8531  *   Item is inner pattern.
8532  * @param[in] group
8533  *   The group to insert the rule.
8534  */
8535 static void
8536 flow_dv_translate_item_ipv6(void *matcher, void *key,
8537                             const struct rte_flow_item *item,
8538                             int inner, uint32_t group)
8539 {
8540         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
8541         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
8542         const struct rte_flow_item_ipv6 nic_mask = {
8543                 .hdr = {
8544                         .src_addr =
8545                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8546                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8547                         .dst_addr =
8548                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8549                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8550                         .vtc_flow = RTE_BE32(0xffffffff),
8551                         .proto = 0xff,
8552                         .hop_limits = 0xff,
8553                 },
8554         };
8555         void *headers_m;
8556         void *headers_v;
8557         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8558         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8559         char *l24_m;
8560         char *l24_v;
8561         uint32_t vtc_m;
8562         uint32_t vtc_v;
8563         int i;
8564         int size;
8565
8566         if (inner) {
8567                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8568                                          inner_headers);
8569                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8570         } else {
8571                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8572                                          outer_headers);
8573                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8574         }
8575         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
8576         if (!ipv6_v)
8577                 return;
8578         if (!ipv6_m)
8579                 ipv6_m = &nic_mask;
8580         size = sizeof(ipv6_m->hdr.dst_addr);
8581         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8582                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8583         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8584                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8585         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
8586         for (i = 0; i < size; ++i)
8587                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
8588         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8589                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8590         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8591                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8592         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
8593         for (i = 0; i < size; ++i)
8594                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
8595         /* TOS. */
8596         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
8597         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
8598         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
8599         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
8600         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
8601         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
8602         /* Label. */
8603         if (inner) {
8604                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
8605                          vtc_m);
8606                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
8607                          vtc_v);
8608         } else {
8609                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
8610                          vtc_m);
8611                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
8612                          vtc_v);
8613         }
8614         /* Protocol. */
8615         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8616                  ipv6_m->hdr.proto);
8617         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8618                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
8619         /* Hop limit. */
8620         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8621                  ipv6_m->hdr.hop_limits);
8622         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8623                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
8624         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8625                  !!(ipv6_m->has_frag_ext));
8626         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8627                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
8628 }
8629
8630 /**
8631  * Add IPV6 fragment extension item to matcher and to the value.
8632  *
8633  * @param[in, out] matcher
8634  *   Flow matcher.
8635  * @param[in, out] key
8636  *   Flow matcher value.
8637  * @param[in] item
8638  *   Flow pattern to translate.
8639  * @param[in] inner
8640  *   Item is inner pattern.
8641  */
8642 static void
8643 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
8644                                      const struct rte_flow_item *item,
8645                                      int inner)
8646 {
8647         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
8648         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
8649         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
8650                 .hdr = {
8651                         .next_header = 0xff,
8652                         .frag_data = RTE_BE16(0xffff),
8653                 },
8654         };
8655         void *headers_m;
8656         void *headers_v;
8657
8658         if (inner) {
8659                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8660                                          inner_headers);
8661                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8662         } else {
8663                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8664                                          outer_headers);
8665                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8666         }
8667         /* IPv6 fragment extension item exists, so packet is IP fragment. */
8668         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
8669         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
8670         if (!ipv6_frag_ext_v)
8671                 return;
8672         if (!ipv6_frag_ext_m)
8673                 ipv6_frag_ext_m = &nic_mask;
8674         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8675                  ipv6_frag_ext_m->hdr.next_header);
8676         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8677                  ipv6_frag_ext_v->hdr.next_header &
8678                  ipv6_frag_ext_m->hdr.next_header);
8679 }
8680
8681 /**
8682  * Add TCP item to matcher and to the value.
8683  *
8684  * @param[in, out] matcher
8685  *   Flow matcher.
8686  * @param[in, out] key
8687  *   Flow matcher value.
8688  * @param[in] item
8689  *   Flow pattern to translate.
8690  * @param[in] inner
8691  *   Item is inner pattern.
8692  */
8693 static void
8694 flow_dv_translate_item_tcp(void *matcher, void *key,
8695                            const struct rte_flow_item *item,
8696                            int inner)
8697 {
8698         const struct rte_flow_item_tcp *tcp_m = item->mask;
8699         const struct rte_flow_item_tcp *tcp_v = item->spec;
8700         void *headers_m;
8701         void *headers_v;
8702
8703         if (inner) {
8704                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8705                                          inner_headers);
8706                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8707         } else {
8708                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8709                                          outer_headers);
8710                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8711         }
8712         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8713         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8714         if (!tcp_v)
8715                 return;
8716         if (!tcp_m)
8717                 tcp_m = &rte_flow_item_tcp_mask;
8718         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8719                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
8720         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8721                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8722         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8723                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8724         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8725                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8726         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8727                  tcp_m->hdr.tcp_flags);
8728         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8729                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8730 }
8731
8732 /**
8733  * Add UDP item to matcher and to the value.
8734  *
8735  * @param[in, out] matcher
8736  *   Flow matcher.
8737  * @param[in, out] key
8738  *   Flow matcher value.
8739  * @param[in] item
8740  *   Flow pattern to translate.
8741  * @param[in] inner
8742  *   Item is inner pattern.
8743  */
8744 static void
8745 flow_dv_translate_item_udp(void *matcher, void *key,
8746                            const struct rte_flow_item *item,
8747                            int inner)
8748 {
8749         const struct rte_flow_item_udp *udp_m = item->mask;
8750         const struct rte_flow_item_udp *udp_v = item->spec;
8751         void *headers_m;
8752         void *headers_v;
8753
8754         if (inner) {
8755                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8756                                          inner_headers);
8757                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8758         } else {
8759                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8760                                          outer_headers);
8761                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8762         }
8763         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8764         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8765         if (!udp_v)
8766                 return;
8767         if (!udp_m)
8768                 udp_m = &rte_flow_item_udp_mask;
8769         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8770                  rte_be_to_cpu_16(udp_m->hdr.src_port));
8771         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8772                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8773         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8774                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
8775         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8776                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8777 }
8778
8779 /**
8780  * Add GRE optional Key item to matcher and to the value.
8781  *
8782  * @param[in, out] matcher
8783  *   Flow matcher.
8784  * @param[in, out] key
8785  *   Flow matcher value.
8786  * @param[in] item
8787  *   Flow pattern to translate.
8788  * @param[in] inner
8789  *   Item is inner pattern.
8790  */
8791 static void
8792 flow_dv_translate_item_gre_key(void *matcher, void *key,
8793                                    const struct rte_flow_item *item)
8794 {
8795         const rte_be32_t *key_m = item->mask;
8796         const rte_be32_t *key_v = item->spec;
8797         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8798         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8799         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8800
8801         /* GRE K bit must be on and should already be validated */
8802         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8803         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8804         if (!key_v)
8805                 return;
8806         if (!key_m)
8807                 key_m = &gre_key_default_mask;
8808         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8809                  rte_be_to_cpu_32(*key_m) >> 8);
8810         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8811                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8812         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8813                  rte_be_to_cpu_32(*key_m) & 0xFF);
8814         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8815                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8816 }
8817
8818 /**
8819  * Add GRE item to matcher and to the value.
8820  *
8821  * @param[in, out] matcher
8822  *   Flow matcher.
8823  * @param[in, out] key
8824  *   Flow matcher value.
8825  * @param[in] item
8826  *   Flow pattern to translate.
8827  * @param[in] pattern_flags
8828  *   Accumulated pattern flags.
8829  */
8830 static void
8831 flow_dv_translate_item_gre(void *matcher, void *key,
8832                            const struct rte_flow_item *item,
8833                            uint64_t pattern_flags)
8834 {
8835         static const struct rte_flow_item_gre empty_gre = {0,};
8836         const struct rte_flow_item_gre *gre_m = item->mask;
8837         const struct rte_flow_item_gre *gre_v = item->spec;
8838         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
8839         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8840         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8841         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8842         struct {
8843                 union {
8844                         __extension__
8845                         struct {
8846                                 uint16_t version:3;
8847                                 uint16_t rsvd0:9;
8848                                 uint16_t s_present:1;
8849                                 uint16_t k_present:1;
8850                                 uint16_t rsvd_bit1:1;
8851                                 uint16_t c_present:1;
8852                         };
8853                         uint16_t value;
8854                 };
8855         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8856         uint16_t protocol_m, protocol_v;
8857
8858         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8859         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8860         if (!gre_v) {
8861                 gre_v = &empty_gre;
8862                 gre_m = &empty_gre;
8863         } else {
8864                 if (!gre_m)
8865                         gre_m = &rte_flow_item_gre_mask;
8866         }
8867         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
8868         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
8869         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
8870                  gre_crks_rsvd0_ver_m.c_present);
8871         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
8872                  gre_crks_rsvd0_ver_v.c_present &
8873                  gre_crks_rsvd0_ver_m.c_present);
8874         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
8875                  gre_crks_rsvd0_ver_m.k_present);
8876         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
8877                  gre_crks_rsvd0_ver_v.k_present &
8878                  gre_crks_rsvd0_ver_m.k_present);
8879         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
8880                  gre_crks_rsvd0_ver_m.s_present);
8881         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
8882                  gre_crks_rsvd0_ver_v.s_present &
8883                  gre_crks_rsvd0_ver_m.s_present);
8884         protocol_m = rte_be_to_cpu_16(gre_m->protocol);
8885         protocol_v = rte_be_to_cpu_16(gre_v->protocol);
8886         if (!protocol_m) {
8887                 /* Force next protocol to prevent matchers duplication */
8888                 protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
8889                 if (protocol_v)
8890                         protocol_m = 0xFFFF;
8891         }
8892         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, protocol_m);
8893         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8894                  protocol_m & protocol_v);
8895 }
8896
8897 /**
8898  * Add GRE optional items to matcher and to the value.
8899  *
8900  * @param[in, out] matcher
8901  *   Flow matcher.
8902  * @param[in, out] key
8903  *   Flow matcher value.
8904  * @param[in] item
8905  *   Flow pattern to translate.
8906  * @param[in] gre_item
8907  *   Pointer to gre_item.
8908  * @param[in] pattern_flags
8909  *   Accumulated pattern flags.
8910  */
8911 static void
8912 flow_dv_translate_item_gre_option(void *matcher, void *key,
8913                                   const struct rte_flow_item *item,
8914                                   const struct rte_flow_item *gre_item,
8915                                   uint64_t pattern_flags)
8916 {
8917         const struct rte_flow_item_gre_opt *option_m = item->mask;
8918         const struct rte_flow_item_gre_opt *option_v = item->spec;
8919         const struct rte_flow_item_gre *gre_m = gre_item->mask;
8920         const struct rte_flow_item_gre *gre_v = gre_item->spec;
8921         static const struct rte_flow_item_gre empty_gre = {0};
8922         struct rte_flow_item gre_key_item;
8923         uint16_t c_rsvd0_ver_m, c_rsvd0_ver_v;
8924         uint16_t protocol_m, protocol_v;
8925         void *misc5_m;
8926         void *misc5_v;
8927
8928         /*
8929          * If only match key field, keep using misc for matching.
8930          * If need to match checksum or sequence, using misc5 and do
8931          * not need using misc.
8932          */
8933         if (!(option_m->sequence.sequence ||
8934               option_m->checksum_rsvd.checksum)) {
8935                 flow_dv_translate_item_gre(matcher, key, gre_item,
8936                                            pattern_flags);
8937                 gre_key_item.spec = &option_v->key.key;
8938                 gre_key_item.mask = &option_m->key.key;
8939                 flow_dv_translate_item_gre_key(matcher, key, &gre_key_item);
8940                 return;
8941         }
8942         if (!gre_v) {
8943                 gre_v = &empty_gre;
8944                 gre_m = &empty_gre;
8945         } else {
8946                 if (!gre_m)
8947                         gre_m = &rte_flow_item_gre_mask;
8948         }
8949         protocol_v = gre_v->protocol;
8950         protocol_m = gre_m->protocol;
8951         if (!protocol_m) {
8952                 /* Force next protocol to prevent matchers duplication */
8953                 uint16_t ether_type =
8954                         mlx5_translate_tunnel_etypes(pattern_flags);
8955                 if (ether_type) {
8956                         protocol_v = rte_be_to_cpu_16(ether_type);
8957                         protocol_m = UINT16_MAX;
8958                 }
8959         }
8960         c_rsvd0_ver_v = gre_v->c_rsvd0_ver;
8961         c_rsvd0_ver_m = gre_m->c_rsvd0_ver;
8962         if (option_m->sequence.sequence) {
8963                 c_rsvd0_ver_v |= RTE_BE16(0x1000);
8964                 c_rsvd0_ver_m |= RTE_BE16(0x1000);
8965         }
8966         if (option_m->key.key) {
8967                 c_rsvd0_ver_v |= RTE_BE16(0x2000);
8968                 c_rsvd0_ver_m |= RTE_BE16(0x2000);
8969         }
8970         if (option_m->checksum_rsvd.checksum) {
8971                 c_rsvd0_ver_v |= RTE_BE16(0x8000);
8972                 c_rsvd0_ver_m |= RTE_BE16(0x8000);
8973         }
8974         /*
8975          * Hardware parses GRE optional field into the fixed location,
8976          * do not need to adjust the tunnel dword indices.
8977          */
8978         misc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);
8979         misc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5);
8980         MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_0,
8981                  rte_be_to_cpu_32((c_rsvd0_ver_v | protocol_v << 16) &
8982                                   (c_rsvd0_ver_m | protocol_m << 16)));
8983         MLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_0,
8984                  rte_be_to_cpu_32(c_rsvd0_ver_m | protocol_m << 16));
8985         MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_1,
8986                  rte_be_to_cpu_32(option_v->checksum_rsvd.checksum &
8987                                   option_m->checksum_rsvd.checksum));
8988         MLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_1,
8989                  rte_be_to_cpu_32(option_m->checksum_rsvd.checksum));
8990         MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_2,
8991                  rte_be_to_cpu_32(option_v->key.key & option_m->key.key));
8992         MLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_2,
8993                  rte_be_to_cpu_32(option_m->key.key));
8994         MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_3,
8995                  rte_be_to_cpu_32(option_v->sequence.sequence &
8996                                   option_m->sequence.sequence));
8997         MLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_3,
8998                  rte_be_to_cpu_32(option_m->sequence.sequence));
8999 }
9000
9001 /**
9002  * Add NVGRE item to matcher and to the value.
9003  *
9004  * @param[in, out] matcher
9005  *   Flow matcher.
9006  * @param[in, out] key
9007  *   Flow matcher value.
9008  * @param[in] item
9009  *   Flow pattern to translate.
9010  * @param[in] pattern_flags
9011  *   Accumulated pattern flags.
9012  */
9013 static void
9014 flow_dv_translate_item_nvgre(void *matcher, void *key,
9015                              const struct rte_flow_item *item,
9016                              unsigned long pattern_flags)
9017 {
9018         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
9019         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
9020         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9021         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9022         const char *tni_flow_id_m;
9023         const char *tni_flow_id_v;
9024         char *gre_key_m;
9025         char *gre_key_v;
9026         int size;
9027         int i;
9028
9029         /* For NVGRE, GRE header fields must be set with defined values. */
9030         const struct rte_flow_item_gre gre_spec = {
9031                 .c_rsvd0_ver = RTE_BE16(0x2000),
9032                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
9033         };
9034         const struct rte_flow_item_gre gre_mask = {
9035                 .c_rsvd0_ver = RTE_BE16(0xB000),
9036                 .protocol = RTE_BE16(UINT16_MAX),
9037         };
9038         const struct rte_flow_item gre_item = {
9039                 .spec = &gre_spec,
9040                 .mask = &gre_mask,
9041                 .last = NULL,
9042         };
9043         flow_dv_translate_item_gre(matcher, key, &gre_item, pattern_flags);
9044         if (!nvgre_v)
9045                 return;
9046         if (!nvgre_m)
9047                 nvgre_m = &rte_flow_item_nvgre_mask;
9048         tni_flow_id_m = (const char *)nvgre_m->tni;
9049         tni_flow_id_v = (const char *)nvgre_v->tni;
9050         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
9051         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
9052         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
9053         memcpy(gre_key_m, tni_flow_id_m, size);
9054         for (i = 0; i < size; ++i)
9055                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
9056 }
9057
9058 /**
9059  * Add VXLAN item to matcher and to the value.
9060  *
9061  * @param[in] dev
9062  *   Pointer to the Ethernet device structure.
9063  * @param[in] attr
9064  *   Flow rule attributes.
9065  * @param[in, out] matcher
9066  *   Flow matcher.
9067  * @param[in, out] key
9068  *   Flow matcher value.
9069  * @param[in] item
9070  *   Flow pattern to translate.
9071  * @param[in] inner
9072  *   Item is inner pattern.
9073  */
9074 static void
9075 flow_dv_translate_item_vxlan(struct rte_eth_dev *dev,
9076                              const struct rte_flow_attr *attr,
9077                              void *matcher, void *key,
9078                              const struct rte_flow_item *item,
9079                              int inner)
9080 {
9081         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
9082         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
9083         void *headers_m;
9084         void *headers_v;
9085         void *misc5_m;
9086         void *misc5_v;
9087         uint32_t *tunnel_header_v;
9088         uint32_t *tunnel_header_m;
9089         uint16_t dport;
9090         struct mlx5_priv *priv = dev->data->dev_private;
9091         const struct rte_flow_item_vxlan nic_mask = {
9092                 .vni = "\xff\xff\xff",
9093                 .rsvd1 = 0xff,
9094         };
9095
9096         if (inner) {
9097                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9098                                          inner_headers);
9099                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9100         } else {
9101                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9102                                          outer_headers);
9103                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9104         }
9105         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
9106                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
9107         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9108                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9109                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9110         }
9111         dport = MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport);
9112         if (!vxlan_v)
9113                 return;
9114         if (!vxlan_m) {
9115                 if ((!attr->group && !priv->sh->tunnel_header_0_1) ||
9116                     (attr->group && !priv->sh->misc5_cap))
9117                         vxlan_m = &rte_flow_item_vxlan_mask;
9118                 else
9119                         vxlan_m = &nic_mask;
9120         }
9121         if ((priv->sh->steering_format_version ==
9122             MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 &&
9123             dport != MLX5_UDP_PORT_VXLAN) ||
9124             (!attr->group && !attr->transfer && !priv->sh->tunnel_header_0_1) ||
9125             ((attr->group || attr->transfer) && !priv->sh->misc5_cap)) {
9126                 void *misc_m;
9127                 void *misc_v;
9128                 char *vni_m;
9129                 char *vni_v;
9130                 int size;
9131                 int i;
9132                 misc_m = MLX5_ADDR_OF(fte_match_param,
9133                                       matcher, misc_parameters);
9134                 misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9135                 size = sizeof(vxlan_m->vni);
9136                 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
9137                 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
9138                 memcpy(vni_m, vxlan_m->vni, size);
9139                 for (i = 0; i < size; ++i)
9140                         vni_v[i] = vni_m[i] & vxlan_v->vni[i];
9141                 return;
9142         }
9143         misc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5);
9144         misc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);
9145         tunnel_header_v = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
9146                                                    misc5_v,
9147                                                    tunnel_header_1);
9148         tunnel_header_m = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
9149                                                    misc5_m,
9150                                                    tunnel_header_1);
9151         *tunnel_header_v = (vxlan_v->vni[0] & vxlan_m->vni[0]) |
9152                            (vxlan_v->vni[1] & vxlan_m->vni[1]) << 8 |
9153                            (vxlan_v->vni[2] & vxlan_m->vni[2]) << 16;
9154         if (*tunnel_header_v)
9155                 *tunnel_header_m = vxlan_m->vni[0] |
9156                         vxlan_m->vni[1] << 8 |
9157                         vxlan_m->vni[2] << 16;
9158         else
9159                 *tunnel_header_m = 0x0;
9160         *tunnel_header_v |= (vxlan_v->rsvd1 & vxlan_m->rsvd1) << 24;
9161         if (vxlan_v->rsvd1 & vxlan_m->rsvd1)
9162                 *tunnel_header_m |= vxlan_m->rsvd1 << 24;
9163 }
9164
9165 /**
9166  * Add VXLAN-GPE item to matcher and to the value.
9167  *
9168  * @param[in, out] matcher
9169  *   Flow matcher.
9170  * @param[in, out] key
9171  *   Flow matcher value.
9172  * @param[in] item
9173  *   Flow pattern to translate.
9174  * @param[in] inner
9175  *   Item is inner pattern.
9176  */
9177
9178 static void
9179 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
9180                                  const struct rte_flow_item *item,
9181                                  const uint64_t pattern_flags)
9182 {
9183         static const struct rte_flow_item_vxlan_gpe dummy_vxlan_gpe_hdr = {0, };
9184         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
9185         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
9186         /* The item was validated to be on the outer side */
9187         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9188         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9189         void *misc_m =
9190                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
9191         void *misc_v =
9192                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9193         char *vni_m =
9194                 MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
9195         char *vni_v =
9196                 MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
9197         int i, size = sizeof(vxlan_m->vni);
9198         uint8_t flags_m = 0xff;
9199         uint8_t flags_v = 0xc;
9200         uint8_t m_protocol, v_protocol;
9201
9202         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9203                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9204                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9205                          MLX5_UDP_PORT_VXLAN_GPE);
9206         }
9207         if (!vxlan_v) {
9208                 vxlan_v = &dummy_vxlan_gpe_hdr;
9209                 vxlan_m = &dummy_vxlan_gpe_hdr;
9210         } else {
9211                 if (!vxlan_m)
9212                         vxlan_m = &rte_flow_item_vxlan_gpe_mask;
9213         }
9214         memcpy(vni_m, vxlan_m->vni, size);
9215         for (i = 0; i < size; ++i)
9216                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
9217         if (vxlan_m->flags) {
9218                 flags_m = vxlan_m->flags;
9219                 flags_v = vxlan_v->flags;
9220         }
9221         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
9222         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
9223         m_protocol = vxlan_m->protocol;
9224         v_protocol = vxlan_v->protocol;
9225         if (!m_protocol) {
9226                 /* Force next protocol to ensure next headers parsing. */
9227                 if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
9228                         v_protocol = RTE_VXLAN_GPE_TYPE_ETH;
9229                 else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
9230                         v_protocol = RTE_VXLAN_GPE_TYPE_IPV4;
9231                 else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
9232                         v_protocol = RTE_VXLAN_GPE_TYPE_IPV6;
9233                 if (v_protocol)
9234                         m_protocol = 0xFF;
9235         }
9236         MLX5_SET(fte_match_set_misc3, misc_m,
9237                  outer_vxlan_gpe_next_protocol, m_protocol);
9238         MLX5_SET(fte_match_set_misc3, misc_v,
9239                  outer_vxlan_gpe_next_protocol, m_protocol & v_protocol);
9240 }
9241
9242 /**
9243  * Add Geneve item to matcher and to the value.
9244  *
9245  * @param[in, out] matcher
9246  *   Flow matcher.
9247  * @param[in, out] key
9248  *   Flow matcher value.
9249  * @param[in] item
9250  *   Flow pattern to translate.
9251  * @param[in] inner
9252  *   Item is inner pattern.
9253  */
9254
9255 static void
9256 flow_dv_translate_item_geneve(void *matcher, void *key,
9257                               const struct rte_flow_item *item,
9258                               uint64_t pattern_flags)
9259 {
9260         static const struct rte_flow_item_geneve empty_geneve = {0,};
9261         const struct rte_flow_item_geneve *geneve_m = item->mask;
9262         const struct rte_flow_item_geneve *geneve_v = item->spec;
9263         /* GENEVE flow item validation allows single tunnel item */
9264         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9265         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9266         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9267         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9268         uint16_t gbhdr_m;
9269         uint16_t gbhdr_v;
9270         char *vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
9271         char *vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
9272         size_t size = sizeof(geneve_m->vni), i;
9273         uint16_t protocol_m, protocol_v;
9274
9275         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9276                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9277                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9278                          MLX5_UDP_PORT_GENEVE);
9279         }
9280         if (!geneve_v) {
9281                 geneve_v = &empty_geneve;
9282                 geneve_m = &empty_geneve;
9283         } else {
9284                 if (!geneve_m)
9285                         geneve_m = &rte_flow_item_geneve_mask;
9286         }
9287         memcpy(vni_m, geneve_m->vni, size);
9288         for (i = 0; i < size; ++i)
9289                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
9290         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
9291         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
9292         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
9293                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9294         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
9295                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9296         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9297                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9298         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9299                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
9300                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9301         protocol_m = rte_be_to_cpu_16(geneve_m->protocol);
9302         protocol_v = rte_be_to_cpu_16(geneve_v->protocol);
9303         if (!protocol_m) {
9304                 /* Force next protocol to prevent matchers duplication */
9305                 protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
9306                 if (protocol_v)
9307                         protocol_m = 0xFFFF;
9308         }
9309         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, protocol_m);
9310         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
9311                  protocol_m & protocol_v);
9312 }
9313
9314 /**
9315  * Create Geneve TLV option resource.
9316  *
9317  * @param dev[in, out]
9318  *   Pointer to rte_eth_dev structure.
9319  * @param[in, out] tag_be24
9320  *   Tag value in big endian then R-shift 8.
9321  * @parm[in, out] dev_flow
9322  *   Pointer to the dev_flow.
9323  * @param[out] error
9324  *   pointer to error structure.
9325  *
9326  * @return
9327  *   0 on success otherwise -errno and errno is set.
9328  */
9329
9330 int
9331 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
9332                                              const struct rte_flow_item *item,
9333                                              struct rte_flow_error *error)
9334 {
9335         struct mlx5_priv *priv = dev->data->dev_private;
9336         struct mlx5_dev_ctx_shared *sh = priv->sh;
9337         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
9338                         sh->geneve_tlv_option_resource;
9339         struct mlx5_devx_obj *obj;
9340         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9341         int ret = 0;
9342
9343         if (!geneve_opt_v)
9344                 return -1;
9345         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
9346         if (geneve_opt_resource != NULL) {
9347                 if (geneve_opt_resource->option_class ==
9348                         geneve_opt_v->option_class &&
9349                         geneve_opt_resource->option_type ==
9350                         geneve_opt_v->option_type &&
9351                         geneve_opt_resource->length ==
9352                         geneve_opt_v->option_len) {
9353                         /* We already have GENEVE TLV option obj allocated. */
9354                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
9355                                            __ATOMIC_RELAXED);
9356                 } else {
9357                         ret = rte_flow_error_set(error, ENOMEM,
9358                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9359                                 "Only one GENEVE TLV option supported");
9360                         goto exit;
9361                 }
9362         } else {
9363                 /* Create a GENEVE TLV object and resource. */
9364                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->cdev->ctx,
9365                                 geneve_opt_v->option_class,
9366                                 geneve_opt_v->option_type,
9367                                 geneve_opt_v->option_len);
9368                 if (!obj) {
9369                         ret = rte_flow_error_set(error, ENODATA,
9370                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9371                                 "Failed to create GENEVE TLV Devx object");
9372                         goto exit;
9373                 }
9374                 sh->geneve_tlv_option_resource =
9375                                 mlx5_malloc(MLX5_MEM_ZERO,
9376                                                 sizeof(*geneve_opt_resource),
9377                                                 0, SOCKET_ID_ANY);
9378                 if (!sh->geneve_tlv_option_resource) {
9379                         claim_zero(mlx5_devx_cmd_destroy(obj));
9380                         ret = rte_flow_error_set(error, ENOMEM,
9381                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9382                                 "GENEVE TLV object memory allocation failed");
9383                         goto exit;
9384                 }
9385                 geneve_opt_resource = sh->geneve_tlv_option_resource;
9386                 geneve_opt_resource->obj = obj;
9387                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
9388                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
9389                 geneve_opt_resource->length = geneve_opt_v->option_len;
9390                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
9391                                 __ATOMIC_RELAXED);
9392         }
9393 exit:
9394         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
9395         return ret;
9396 }
9397
9398 /**
9399  * Add Geneve TLV option item to matcher.
9400  *
9401  * @param[in, out] dev
9402  *   Pointer to rte_eth_dev structure.
9403  * @param[in, out] matcher
9404  *   Flow matcher.
9405  * @param[in, out] key
9406  *   Flow matcher value.
9407  * @param[in] item
9408  *   Flow pattern to translate.
9409  * @param[out] error
9410  *   Pointer to error structure.
9411  */
9412 static int
9413 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
9414                                   void *key, const struct rte_flow_item *item,
9415                                   struct rte_flow_error *error)
9416 {
9417         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
9418         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9419         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9420         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9421         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9422                         misc_parameters_3);
9423         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9424         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
9425         int ret = 0;
9426
9427         if (!geneve_opt_v)
9428                 return -1;
9429         if (!geneve_opt_m)
9430                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
9431         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
9432                                                            error);
9433         if (ret) {
9434                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
9435                 return ret;
9436         }
9437         /*
9438          * Set the option length in GENEVE header if not requested.
9439          * The GENEVE TLV option length is expressed by the option length field
9440          * in the GENEVE header.
9441          * If the option length was not requested but the GENEVE TLV option item
9442          * is present we set the option length field implicitly.
9443          */
9444         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
9445                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9446                          MLX5_GENEVE_OPTLEN_MASK);
9447                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9448                          geneve_opt_v->option_len + 1);
9449         }
9450         MLX5_SET(fte_match_set_misc, misc_m, geneve_tlv_option_0_exist, 1);
9451         MLX5_SET(fte_match_set_misc, misc_v, geneve_tlv_option_0_exist, 1);
9452         /* Set the data. */
9453         if (geneve_opt_v->data) {
9454                 memcpy(&opt_data_key, geneve_opt_v->data,
9455                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9456                                 sizeof(opt_data_key)));
9457                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9458                                 sizeof(opt_data_key));
9459                 memcpy(&opt_data_mask, geneve_opt_m->data,
9460                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9461                                 sizeof(opt_data_mask)));
9462                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9463                                 sizeof(opt_data_mask));
9464                 MLX5_SET(fte_match_set_misc3, misc3_m,
9465                                 geneve_tlv_option_0_data,
9466                                 rte_be_to_cpu_32(opt_data_mask));
9467                 MLX5_SET(fte_match_set_misc3, misc3_v,
9468                                 geneve_tlv_option_0_data,
9469                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
9470         }
9471         return ret;
9472 }
9473
9474 /**
9475  * Add MPLS item to matcher and to the value.
9476  *
9477  * @param[in, out] matcher
9478  *   Flow matcher.
9479  * @param[in, out] key
9480  *   Flow matcher value.
9481  * @param[in] item
9482  *   Flow pattern to translate.
9483  * @param[in] prev_layer
9484  *   The protocol layer indicated in previous item.
9485  * @param[in] inner
9486  *   Item is inner pattern.
9487  */
9488 static void
9489 flow_dv_translate_item_mpls(void *matcher, void *key,
9490                             const struct rte_flow_item *item,
9491                             uint64_t prev_layer,
9492                             int inner)
9493 {
9494         const uint32_t *in_mpls_m = item->mask;
9495         const uint32_t *in_mpls_v = item->spec;
9496         uint32_t *out_mpls_m = 0;
9497         uint32_t *out_mpls_v = 0;
9498         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9499         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9500         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
9501                                      misc_parameters_2);
9502         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9503         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9504         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9505
9506         switch (prev_layer) {
9507         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9508                 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9509                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
9510                                  0xffff);
9511                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9512                                  MLX5_UDP_PORT_MPLS);
9513                 }
9514                 break;
9515         case MLX5_FLOW_LAYER_GRE:
9516                 /* Fall-through. */
9517         case MLX5_FLOW_LAYER_GRE_KEY:
9518                 if (!MLX5_GET16(fte_match_set_misc, misc_v, gre_protocol)) {
9519                         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
9520                                  0xffff);
9521                         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
9522                                  RTE_ETHER_TYPE_MPLS);
9523                 }
9524                 break;
9525         default:
9526                 break;
9527         }
9528         if (!in_mpls_v)
9529                 return;
9530         if (!in_mpls_m)
9531                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
9532         switch (prev_layer) {
9533         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9534                 out_mpls_m =
9535                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9536                                                  outer_first_mpls_over_udp);
9537                 out_mpls_v =
9538                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9539                                                  outer_first_mpls_over_udp);
9540                 break;
9541         case MLX5_FLOW_LAYER_GRE:
9542                 out_mpls_m =
9543                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9544                                                  outer_first_mpls_over_gre);
9545                 out_mpls_v =
9546                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9547                                                  outer_first_mpls_over_gre);
9548                 break;
9549         default:
9550                 /* Inner MPLS not over GRE is not supported. */
9551                 if (!inner) {
9552                         out_mpls_m =
9553                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9554                                                          misc2_m,
9555                                                          outer_first_mpls);
9556                         out_mpls_v =
9557                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9558                                                          misc2_v,
9559                                                          outer_first_mpls);
9560                 }
9561                 break;
9562         }
9563         if (out_mpls_m && out_mpls_v) {
9564                 *out_mpls_m = *in_mpls_m;
9565                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
9566         }
9567 }
9568
9569 /**
9570  * Add metadata register item to matcher
9571  *
9572  * @param[in, out] matcher
9573  *   Flow matcher.
9574  * @param[in, out] key
9575  *   Flow matcher value.
9576  * @param[in] reg_type
9577  *   Type of device metadata register
9578  * @param[in] value
9579  *   Register value
9580  * @param[in] mask
9581  *   Register mask
9582  */
9583 static void
9584 flow_dv_match_meta_reg(void *matcher, void *key,
9585                        enum modify_reg reg_type,
9586                        uint32_t data, uint32_t mask)
9587 {
9588         void *misc2_m =
9589                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
9590         void *misc2_v =
9591                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9592         uint32_t temp;
9593
9594         data &= mask;
9595         switch (reg_type) {
9596         case REG_A:
9597                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
9598                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
9599                 break;
9600         case REG_B:
9601                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
9602                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
9603                 break;
9604         case REG_C_0:
9605                 /*
9606                  * The metadata register C0 field might be divided into
9607                  * source vport index and META item value, we should set
9608                  * this field according to specified mask, not as whole one.
9609                  */
9610                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
9611                 temp |= mask;
9612                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
9613                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
9614                 temp &= ~mask;
9615                 temp |= data;
9616                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
9617                 break;
9618         case REG_C_1:
9619                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
9620                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
9621                 break;
9622         case REG_C_2:
9623                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
9624                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
9625                 break;
9626         case REG_C_3:
9627                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
9628                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
9629                 break;
9630         case REG_C_4:
9631                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
9632                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
9633                 break;
9634         case REG_C_5:
9635                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
9636                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
9637                 break;
9638         case REG_C_6:
9639                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
9640                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
9641                 break;
9642         case REG_C_7:
9643                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
9644                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
9645                 break;
9646         default:
9647                 MLX5_ASSERT(false);
9648                 break;
9649         }
9650 }
9651
9652 /**
9653  * Add MARK item to matcher
9654  *
9655  * @param[in] dev
9656  *   The device to configure through.
9657  * @param[in, out] matcher
9658  *   Flow matcher.
9659  * @param[in, out] key
9660  *   Flow matcher value.
9661  * @param[in] item
9662  *   Flow pattern to translate.
9663  */
9664 static void
9665 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
9666                             void *matcher, void *key,
9667                             const struct rte_flow_item *item)
9668 {
9669         struct mlx5_priv *priv = dev->data->dev_private;
9670         const struct rte_flow_item_mark *mark;
9671         uint32_t value;
9672         uint32_t mask;
9673
9674         mark = item->mask ? (const void *)item->mask :
9675                             &rte_flow_item_mark_mask;
9676         mask = mark->id & priv->sh->dv_mark_mask;
9677         mark = (const void *)item->spec;
9678         MLX5_ASSERT(mark);
9679         value = mark->id & priv->sh->dv_mark_mask & mask;
9680         if (mask) {
9681                 enum modify_reg reg;
9682
9683                 /* Get the metadata register index for the mark. */
9684                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
9685                 MLX5_ASSERT(reg > 0);
9686                 if (reg == REG_C_0) {
9687                         struct mlx5_priv *priv = dev->data->dev_private;
9688                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9689                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9690
9691                         mask &= msk_c0;
9692                         mask <<= shl_c0;
9693                         value <<= shl_c0;
9694                 }
9695                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9696         }
9697 }
9698
9699 /**
9700  * Add META item to matcher
9701  *
9702  * @param[in] dev
9703  *   The devich to configure through.
9704  * @param[in, out] matcher
9705  *   Flow matcher.
9706  * @param[in, out] key
9707  *   Flow matcher value.
9708  * @param[in] attr
9709  *   Attributes of flow that includes this item.
9710  * @param[in] item
9711  *   Flow pattern to translate.
9712  */
9713 static void
9714 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
9715                             void *matcher, void *key,
9716                             const struct rte_flow_attr *attr,
9717                             const struct rte_flow_item *item)
9718 {
9719         const struct rte_flow_item_meta *meta_m;
9720         const struct rte_flow_item_meta *meta_v;
9721
9722         meta_m = (const void *)item->mask;
9723         if (!meta_m)
9724                 meta_m = &rte_flow_item_meta_mask;
9725         meta_v = (const void *)item->spec;
9726         if (meta_v) {
9727                 int reg;
9728                 uint32_t value = meta_v->data;
9729                 uint32_t mask = meta_m->data;
9730
9731                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
9732                 if (reg < 0)
9733                         return;
9734                 MLX5_ASSERT(reg != REG_NON);
9735                 if (reg == REG_C_0) {
9736                         struct mlx5_priv *priv = dev->data->dev_private;
9737                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9738                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9739
9740                         mask &= msk_c0;
9741                         mask <<= shl_c0;
9742                         value <<= shl_c0;
9743                 }
9744                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9745         }
9746 }
9747
9748 /**
9749  * Add vport metadata Reg C0 item to matcher
9750  *
9751  * @param[in, out] matcher
9752  *   Flow matcher.
9753  * @param[in, out] key
9754  *   Flow matcher value.
9755  * @param[in] reg
9756  *   Flow pattern to translate.
9757  */
9758 static void
9759 flow_dv_translate_item_meta_vport(void *matcher, void *key,
9760                                   uint32_t value, uint32_t mask)
9761 {
9762         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
9763 }
9764
9765 /**
9766  * Add tag item to matcher
9767  *
9768  * @param[in] dev
9769  *   The devich to configure through.
9770  * @param[in, out] matcher
9771  *   Flow matcher.
9772  * @param[in, out] key
9773  *   Flow matcher value.
9774  * @param[in] item
9775  *   Flow pattern to translate.
9776  */
9777 static void
9778 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
9779                                 void *matcher, void *key,
9780                                 const struct rte_flow_item *item)
9781 {
9782         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
9783         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
9784         uint32_t mask, value;
9785
9786         MLX5_ASSERT(tag_v);
9787         value = tag_v->data;
9788         mask = tag_m ? tag_m->data : UINT32_MAX;
9789         if (tag_v->id == REG_C_0) {
9790                 struct mlx5_priv *priv = dev->data->dev_private;
9791                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9792                 uint32_t shl_c0 = rte_bsf32(msk_c0);
9793
9794                 mask &= msk_c0;
9795                 mask <<= shl_c0;
9796                 value <<= shl_c0;
9797         }
9798         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
9799 }
9800
9801 /**
9802  * Add TAG item to matcher
9803  *
9804  * @param[in] dev
9805  *   The devich to configure through.
9806  * @param[in, out] matcher
9807  *   Flow matcher.
9808  * @param[in, out] key
9809  *   Flow matcher value.
9810  * @param[in] item
9811  *   Flow pattern to translate.
9812  */
9813 static void
9814 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
9815                            void *matcher, void *key,
9816                            const struct rte_flow_item *item)
9817 {
9818         const struct rte_flow_item_tag *tag_v = item->spec;
9819         const struct rte_flow_item_tag *tag_m = item->mask;
9820         enum modify_reg reg;
9821
9822         MLX5_ASSERT(tag_v);
9823         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
9824         /* Get the metadata register index for the tag. */
9825         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
9826         MLX5_ASSERT(reg > 0);
9827         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
9828 }
9829
9830 /**
9831  * Add source vport match to the specified matcher.
9832  *
9833  * @param[in, out] matcher
9834  *   Flow matcher.
9835  * @param[in, out] key
9836  *   Flow matcher value.
9837  * @param[in] port
9838  *   Source vport value to match
9839  * @param[in] mask
9840  *   Mask
9841  */
9842 static void
9843 flow_dv_translate_item_source_vport(void *matcher, void *key,
9844                                     int16_t port, uint16_t mask)
9845 {
9846         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9847         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9848
9849         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
9850         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
9851 }
9852
9853 /**
9854  * Translate port-id item to eswitch match on  port-id.
9855  *
9856  * @param[in] dev
9857  *   The devich to configure through.
9858  * @param[in, out] matcher
9859  *   Flow matcher.
9860  * @param[in, out] key
9861  *   Flow matcher value.
9862  * @param[in] item
9863  *   Flow pattern to translate.
9864  * @param[in]
9865  *   Flow attributes.
9866  *
9867  * @return
9868  *   0 on success, a negative errno value otherwise.
9869  */
9870 static int
9871 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
9872                                void *key, const struct rte_flow_item *item,
9873                                const struct rte_flow_attr *attr)
9874 {
9875         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
9876         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
9877         struct mlx5_priv *priv;
9878         uint16_t mask, id;
9879
9880         if (pid_v && pid_v->id == MLX5_PORT_ESW_MGR) {
9881                 flow_dv_translate_item_source_vport(matcher, key,
9882                         flow_dv_get_esw_manager_vport_id(dev), 0xffff);
9883                 return 0;
9884         }
9885         mask = pid_m ? pid_m->id : 0xffff;
9886         id = pid_v ? pid_v->id : dev->data->port_id;
9887         priv = mlx5_port_to_eswitch_info(id, item == NULL);
9888         if (!priv)
9889                 return -rte_errno;
9890         /*
9891          * Translate to vport field or to metadata, depending on mode.
9892          * Kernel can use either misc.source_port or half of C0 metadata
9893          * register.
9894          */
9895         if (priv->vport_meta_mask) {
9896                 /*
9897                  * Provide the hint for SW steering library
9898                  * to insert the flow into ingress domain and
9899                  * save the extra vport match.
9900                  */
9901                 if (mask == 0xffff && priv->vport_id == 0xffff &&
9902                     priv->pf_bond < 0 && attr->transfer)
9903                         flow_dv_translate_item_source_vport
9904                                 (matcher, key, priv->vport_id, mask);
9905                 /*
9906                  * We should always set the vport metadata register,
9907                  * otherwise the SW steering library can drop
9908                  * the rule if wire vport metadata value is not zero,
9909                  * it depends on kernel configuration.
9910                  */
9911                 flow_dv_translate_item_meta_vport(matcher, key,
9912                                                   priv->vport_meta_tag,
9913                                                   priv->vport_meta_mask);
9914         } else {
9915                 flow_dv_translate_item_source_vport(matcher, key,
9916                                                     priv->vport_id, mask);
9917         }
9918         return 0;
9919 }
9920
9921 /**
9922  * Add ICMP6 item to matcher and to the value.
9923  *
9924  * @param[in, out] matcher
9925  *   Flow matcher.
9926  * @param[in, out] key
9927  *   Flow matcher value.
9928  * @param[in] item
9929  *   Flow pattern to translate.
9930  * @param[in] inner
9931  *   Item is inner pattern.
9932  */
9933 static void
9934 flow_dv_translate_item_icmp6(void *matcher, void *key,
9935                               const struct rte_flow_item *item,
9936                               int inner)
9937 {
9938         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
9939         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
9940         void *headers_m;
9941         void *headers_v;
9942         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9943                                      misc_parameters_3);
9944         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9945         if (inner) {
9946                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9947                                          inner_headers);
9948                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9949         } else {
9950                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9951                                          outer_headers);
9952                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9953         }
9954         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9955         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
9956         if (!icmp6_v)
9957                 return;
9958         if (!icmp6_m)
9959                 icmp6_m = &rte_flow_item_icmp6_mask;
9960         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
9961         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
9962                  icmp6_v->type & icmp6_m->type);
9963         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
9964         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
9965                  icmp6_v->code & icmp6_m->code);
9966 }
9967
9968 /**
9969  * Add ICMP item to matcher and to the value.
9970  *
9971  * @param[in, out] matcher
9972  *   Flow matcher.
9973  * @param[in, out] key
9974  *   Flow matcher value.
9975  * @param[in] item
9976  *   Flow pattern to translate.
9977  * @param[in] inner
9978  *   Item is inner pattern.
9979  */
9980 static void
9981 flow_dv_translate_item_icmp(void *matcher, void *key,
9982                             const struct rte_flow_item *item,
9983                             int inner)
9984 {
9985         const struct rte_flow_item_icmp *icmp_m = item->mask;
9986         const struct rte_flow_item_icmp *icmp_v = item->spec;
9987         uint32_t icmp_header_data_m = 0;
9988         uint32_t icmp_header_data_v = 0;
9989         void *headers_m;
9990         void *headers_v;
9991         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9992                                      misc_parameters_3);
9993         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9994         if (inner) {
9995                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9996                                          inner_headers);
9997                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9998         } else {
9999                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
10000                                          outer_headers);
10001                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
10002         }
10003         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
10004         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
10005         if (!icmp_v)
10006                 return;
10007         if (!icmp_m)
10008                 icmp_m = &rte_flow_item_icmp_mask;
10009         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
10010                  icmp_m->hdr.icmp_type);
10011         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
10012                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
10013         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
10014                  icmp_m->hdr.icmp_code);
10015         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
10016                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
10017         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
10018         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
10019         if (icmp_header_data_m) {
10020                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
10021                 icmp_header_data_v |=
10022                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
10023                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
10024                          icmp_header_data_m);
10025                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
10026                          icmp_header_data_v & icmp_header_data_m);
10027         }
10028 }
10029
10030 /**
10031  * Add GTP item to matcher and to the value.
10032  *
10033  * @param[in, out] matcher
10034  *   Flow matcher.
10035  * @param[in, out] key
10036  *   Flow matcher value.
10037  * @param[in] item
10038  *   Flow pattern to translate.
10039  * @param[in] inner
10040  *   Item is inner pattern.
10041  */
10042 static void
10043 flow_dv_translate_item_gtp(void *matcher, void *key,
10044                            const struct rte_flow_item *item, int inner)
10045 {
10046         const struct rte_flow_item_gtp *gtp_m = item->mask;
10047         const struct rte_flow_item_gtp *gtp_v = item->spec;
10048         void *headers_m;
10049         void *headers_v;
10050         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
10051                                      misc_parameters_3);
10052         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
10053         uint16_t dport = RTE_GTPU_UDP_PORT;
10054
10055         if (inner) {
10056                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
10057                                          inner_headers);
10058                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
10059         } else {
10060                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
10061                                          outer_headers);
10062                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
10063         }
10064         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
10065                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
10066                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
10067         }
10068         if (!gtp_v)
10069                 return;
10070         if (!gtp_m)
10071                 gtp_m = &rte_flow_item_gtp_mask;
10072         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
10073                  gtp_m->v_pt_rsv_flags);
10074         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
10075                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
10076         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
10077         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
10078                  gtp_v->msg_type & gtp_m->msg_type);
10079         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
10080                  rte_be_to_cpu_32(gtp_m->teid));
10081         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
10082                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
10083 }
10084
10085 /**
10086  * Add GTP PSC item to matcher.
10087  *
10088  * @param[in, out] matcher
10089  *   Flow matcher.
10090  * @param[in, out] key
10091  *   Flow matcher value.
10092  * @param[in] item
10093  *   Flow pattern to translate.
10094  */
10095 static int
10096 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
10097                                const struct rte_flow_item *item)
10098 {
10099         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
10100         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
10101         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
10102                         misc_parameters_3);
10103         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
10104         union {
10105                 uint32_t w32;
10106                 struct {
10107                         uint16_t seq_num;
10108                         uint8_t npdu_num;
10109                         uint8_t next_ext_header_type;
10110                 };
10111         } dw_2;
10112         uint8_t gtp_flags;
10113
10114         /* Always set E-flag match on one, regardless of GTP item settings. */
10115         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
10116         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
10117         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
10118         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
10119         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
10120         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
10121         /*Set next extension header type. */
10122         dw_2.seq_num = 0;
10123         dw_2.npdu_num = 0;
10124         dw_2.next_ext_header_type = 0xff;
10125         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
10126                  rte_cpu_to_be_32(dw_2.w32));
10127         dw_2.seq_num = 0;
10128         dw_2.npdu_num = 0;
10129         dw_2.next_ext_header_type = 0x85;
10130         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
10131                  rte_cpu_to_be_32(dw_2.w32));
10132         if (gtp_psc_v) {
10133                 union {
10134                         uint32_t w32;
10135                         struct {
10136                                 uint8_t len;
10137                                 uint8_t type_flags;
10138                                 uint8_t qfi;
10139                                 uint8_t reserved;
10140                         };
10141                 } dw_0;
10142
10143                 /*Set extension header PDU type and Qos. */
10144                 if (!gtp_psc_m)
10145                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
10146                 dw_0.w32 = 0;
10147                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->hdr.type);
10148                 dw_0.qfi = gtp_psc_m->hdr.qfi;
10149                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
10150                          rte_cpu_to_be_32(dw_0.w32));
10151                 dw_0.w32 = 0;
10152                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->hdr.type &
10153                                                         gtp_psc_m->hdr.type);
10154                 dw_0.qfi = gtp_psc_v->hdr.qfi & gtp_psc_m->hdr.qfi;
10155                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
10156                          rte_cpu_to_be_32(dw_0.w32));
10157         }
10158         return 0;
10159 }
10160
10161 /**
10162  * Add eCPRI item to matcher and to the value.
10163  *
10164  * @param[in] dev
10165  *   The devich to configure through.
10166  * @param[in, out] matcher
10167  *   Flow matcher.
10168  * @param[in, out] key
10169  *   Flow matcher value.
10170  * @param[in] item
10171  *   Flow pattern to translate.
10172  * @param[in] last_item
10173  *   Last item flags.
10174  */
10175 static void
10176 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
10177                              void *key, const struct rte_flow_item *item,
10178                              uint64_t last_item)
10179 {
10180         struct mlx5_priv *priv = dev->data->dev_private;
10181         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
10182         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
10183         struct rte_ecpri_common_hdr common;
10184         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
10185                                      misc_parameters_4);
10186         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
10187         uint32_t *samples;
10188         void *dw_m;
10189         void *dw_v;
10190
10191         /*
10192          * In case of eCPRI over Ethernet, if EtherType is not specified,
10193          * match on eCPRI EtherType implicitly.
10194          */
10195         if (last_item & MLX5_FLOW_LAYER_OUTER_L2) {
10196                 void *hdrs_m, *hdrs_v, *l2m, *l2v;
10197
10198                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
10199                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
10200                 l2m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, ethertype);
10201                 l2v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
10202                 if (*(uint16_t *)l2m == 0 && *(uint16_t *)l2v == 0) {
10203                         *(uint16_t *)l2m = UINT16_MAX;
10204                         *(uint16_t *)l2v = RTE_BE16(RTE_ETHER_TYPE_ECPRI);
10205                 }
10206         }
10207         if (!ecpri_v)
10208                 return;
10209         if (!ecpri_m)
10210                 ecpri_m = &rte_flow_item_ecpri_mask;
10211         /*
10212          * Maximal four DW samples are supported in a single matching now.
10213          * Two are used now for a eCPRI matching:
10214          * 1. Type: one byte, mask should be 0x00ff0000 in network order
10215          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
10216          *    if any.
10217          */
10218         if (!ecpri_m->hdr.common.u32)
10219                 return;
10220         samples = priv->sh->ecpri_parser.ids;
10221         /* Need to take the whole DW as the mask to fill the entry. */
10222         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
10223                             prog_sample_field_value_0);
10224         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
10225                             prog_sample_field_value_0);
10226         /* Already big endian (network order) in the header. */
10227         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
10228         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
10229         /* Sample#0, used for matching type, offset 0. */
10230         MLX5_SET(fte_match_set_misc4, misc4_m,
10231                  prog_sample_field_id_0, samples[0]);
10232         /* It makes no sense to set the sample ID in the mask field. */
10233         MLX5_SET(fte_match_set_misc4, misc4_v,
10234                  prog_sample_field_id_0, samples[0]);
10235         /*
10236          * Checking if message body part needs to be matched.
10237          * Some wildcard rules only matching type field should be supported.
10238          */
10239         if (ecpri_m->hdr.dummy[0]) {
10240                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
10241                 switch (common.type) {
10242                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
10243                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
10244                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
10245                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
10246                                             prog_sample_field_value_1);
10247                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
10248                                             prog_sample_field_value_1);
10249                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
10250                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
10251                                             ecpri_m->hdr.dummy[0];
10252                         /* Sample#1, to match message body, offset 4. */
10253                         MLX5_SET(fte_match_set_misc4, misc4_m,
10254                                  prog_sample_field_id_1, samples[1]);
10255                         MLX5_SET(fte_match_set_misc4, misc4_v,
10256                                  prog_sample_field_id_1, samples[1]);
10257                         break;
10258                 default:
10259                         /* Others, do not match any sample ID. */
10260                         break;
10261                 }
10262         }
10263 }
10264
10265 /*
10266  * Add connection tracking status item to matcher
10267  *
10268  * @param[in] dev
10269  *   The devich to configure through.
10270  * @param[in, out] matcher
10271  *   Flow matcher.
10272  * @param[in, out] key
10273  *   Flow matcher value.
10274  * @param[in] item
10275  *   Flow pattern to translate.
10276  */
10277 static void
10278 flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
10279                               void *matcher, void *key,
10280                               const struct rte_flow_item *item)
10281 {
10282         uint32_t reg_value = 0;
10283         int reg_id;
10284         /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */
10285         uint32_t reg_mask = 0;
10286         const struct rte_flow_item_conntrack *spec = item->spec;
10287         const struct rte_flow_item_conntrack *mask = item->mask;
10288         uint32_t flags;
10289         struct rte_flow_error error;
10290
10291         if (!mask)
10292                 mask = &rte_flow_item_conntrack_mask;
10293         if (!spec || !mask->flags)
10294                 return;
10295         flags = spec->flags & mask->flags;
10296         /* The conflict should be checked in the validation. */
10297         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
10298                 reg_value |= MLX5_CT_SYNDROME_VALID;
10299         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10300                 reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
10301         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
10302                 reg_value |= MLX5_CT_SYNDROME_INVALID;
10303         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
10304                 reg_value |= MLX5_CT_SYNDROME_TRAP;
10305         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10306                 reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
10307         if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
10308                            RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
10309                            RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
10310                 reg_mask |= 0xc0;
10311         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10312                 reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
10313         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10314                 reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
10315         /* The REG_C_x value could be saved during startup. */
10316         reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);
10317         if (reg_id == REG_NON)
10318                 return;
10319         flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,
10320                                reg_value, reg_mask);
10321 }
10322
10323 static void
10324 flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key,
10325                             const struct rte_flow_item *item,
10326                             struct mlx5_flow *dev_flow, bool is_inner)
10327 {
10328         const struct rte_flow_item_flex *spec =
10329                 (const struct rte_flow_item_flex *)item->spec;
10330         int index = mlx5_flex_acquire_index(dev, spec->handle, false);
10331
10332         MLX5_ASSERT(index >= 0 && index <= (int)(sizeof(uint32_t) * CHAR_BIT));
10333         if (index < 0)
10334                 return;
10335         if (!(dev_flow->handle->flex_item & RTE_BIT32(index))) {
10336                 /* Don't count both inner and outer flex items in one rule. */
10337                 if (mlx5_flex_acquire_index(dev, spec->handle, true) != index)
10338                         MLX5_ASSERT(false);
10339                 dev_flow->handle->flex_item |= (uint8_t)RTE_BIT32(index);
10340         }
10341         mlx5_flex_flow_translate_item(dev, matcher, key, item, is_inner);
10342 }
10343
10344 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
10345
10346 #define HEADER_IS_ZERO(match_criteria, headers)                              \
10347         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
10348                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
10349
10350 /**
10351  * Calculate flow matcher enable bitmap.
10352  *
10353  * @param match_criteria
10354  *   Pointer to flow matcher criteria.
10355  *
10356  * @return
10357  *   Bitmap of enabled fields.
10358  */
10359 static uint8_t
10360 flow_dv_matcher_enable(uint32_t *match_criteria)
10361 {
10362         uint8_t match_criteria_enable;
10363
10364         match_criteria_enable =
10365                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
10366                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
10367         match_criteria_enable |=
10368                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
10369                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
10370         match_criteria_enable |=
10371                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
10372                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
10373         match_criteria_enable |=
10374                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
10375                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
10376         match_criteria_enable |=
10377                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
10378                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
10379         match_criteria_enable |=
10380                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
10381                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
10382         match_criteria_enable |=
10383                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_5)) <<
10384                 MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT;
10385         return match_criteria_enable;
10386 }
10387
10388 static void
10389 __flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria)
10390 {
10391         /*
10392          * Check flow matching criteria first, subtract misc5/4 length if flow
10393          * doesn't own misc5/4 parameters. In some old rdma-core releases,
10394          * misc5/4 are not supported, and matcher creation failure is expected
10395          * w/o subtraction. If misc5 is provided, misc4 must be counted in since
10396          * misc5 is right after misc4.
10397          */
10398         if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) {
10399                 *size = MLX5_ST_SZ_BYTES(fte_match_param) -
10400                         MLX5_ST_SZ_BYTES(fte_match_set_misc5);
10401                 if (!(match_criteria & (1 <<
10402                         MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT))) {
10403                         *size -= MLX5_ST_SZ_BYTES(fte_match_set_misc4);
10404                 }
10405         }
10406 }
10407
10408 static struct mlx5_list_entry *
10409 flow_dv_matcher_clone_cb(void *tool_ctx __rte_unused,
10410                          struct mlx5_list_entry *entry, void *cb_ctx)
10411 {
10412         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10413         struct mlx5_flow_dv_matcher *ref = ctx->data;
10414         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10415                                                             typeof(*tbl), tbl);
10416         struct mlx5_flow_dv_matcher *resource = mlx5_malloc(MLX5_MEM_ANY,
10417                                                             sizeof(*resource),
10418                                                             0, SOCKET_ID_ANY);
10419
10420         if (!resource) {
10421                 rte_flow_error_set(ctx->error, ENOMEM,
10422                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10423                                    "cannot create matcher");
10424                 return NULL;
10425         }
10426         memcpy(resource, entry, sizeof(*resource));
10427         resource->tbl = &tbl->tbl;
10428         return &resource->entry;
10429 }
10430
10431 static void
10432 flow_dv_matcher_clone_free_cb(void *tool_ctx __rte_unused,
10433                              struct mlx5_list_entry *entry)
10434 {
10435         mlx5_free(entry);
10436 }
10437
10438 struct mlx5_list_entry *
10439 flow_dv_tbl_create_cb(void *tool_ctx, void *cb_ctx)
10440 {
10441         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10442         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10443         struct rte_eth_dev *dev = ctx->dev;
10444         struct mlx5_flow_tbl_data_entry *tbl_data;
10445         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data2;
10446         struct rte_flow_error *error = ctx->error;
10447         union mlx5_flow_tbl_key key = { .v64 = *(uint64_t *)(ctx->data) };
10448         struct mlx5_flow_tbl_resource *tbl;
10449         void *domain;
10450         uint32_t idx = 0;
10451         int ret;
10452
10453         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10454         if (!tbl_data) {
10455                 rte_flow_error_set(error, ENOMEM,
10456                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10457                                    NULL,
10458                                    "cannot allocate flow table data entry");
10459                 return NULL;
10460         }
10461         tbl_data->idx = idx;
10462         tbl_data->tunnel = tt_prm->tunnel;
10463         tbl_data->group_id = tt_prm->group_id;
10464         tbl_data->external = !!tt_prm->external;
10465         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
10466         tbl_data->is_egress = !!key.is_egress;
10467         tbl_data->is_transfer = !!key.is_fdb;
10468         tbl_data->dummy = !!key.dummy;
10469         tbl_data->level = key.level;
10470         tbl_data->id = key.id;
10471         tbl = &tbl_data->tbl;
10472         if (key.dummy)
10473                 return &tbl_data->entry;
10474         if (key.is_fdb)
10475                 domain = sh->fdb_domain;
10476         else if (key.is_egress)
10477                 domain = sh->tx_domain;
10478         else
10479                 domain = sh->rx_domain;
10480         ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
10481         if (ret) {
10482                 rte_flow_error_set(error, ENOMEM,
10483                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10484                                    NULL, "cannot create flow table object");
10485                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10486                 return NULL;
10487         }
10488         if (key.level != 0) {
10489                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
10490                                         (tbl->obj, &tbl_data->jump.action);
10491                 if (ret) {
10492                         rte_flow_error_set(error, ENOMEM,
10493                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10494                                            NULL,
10495                                            "cannot create flow jump action");
10496                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10497                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10498                         return NULL;
10499                 }
10500         }
10501         MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
10502               key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
10503               key.level, key.id);
10504         tbl_data->matchers = mlx5_list_create(matcher_name, sh, true,
10505                                               flow_dv_matcher_create_cb,
10506                                               flow_dv_matcher_match_cb,
10507                                               flow_dv_matcher_remove_cb,
10508                                               flow_dv_matcher_clone_cb,
10509                                               flow_dv_matcher_clone_free_cb);
10510         if (!tbl_data->matchers) {
10511                 rte_flow_error_set(error, ENOMEM,
10512                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10513                                    NULL,
10514                                    "cannot create tbl matcher list");
10515                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10516                 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10517                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10518                 return NULL;
10519         }
10520         return &tbl_data->entry;
10521 }
10522
10523 int
10524 flow_dv_tbl_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10525                      void *cb_ctx)
10526 {
10527         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10528         struct mlx5_flow_tbl_data_entry *tbl_data =
10529                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10530         union mlx5_flow_tbl_key key = { .v64 =  *(uint64_t *)(ctx->data) };
10531
10532         return tbl_data->level != key.level ||
10533                tbl_data->id != key.id ||
10534                tbl_data->dummy != key.dummy ||
10535                tbl_data->is_transfer != !!key.is_fdb ||
10536                tbl_data->is_egress != !!key.is_egress;
10537 }
10538
10539 struct mlx5_list_entry *
10540 flow_dv_tbl_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10541                       void *cb_ctx)
10542 {
10543         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10544         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10545         struct mlx5_flow_tbl_data_entry *tbl_data;
10546         struct rte_flow_error *error = ctx->error;
10547         uint32_t idx = 0;
10548
10549         tbl_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10550         if (!tbl_data) {
10551                 rte_flow_error_set(error, ENOMEM,
10552                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10553                                    NULL,
10554                                    "cannot allocate flow table data entry");
10555                 return NULL;
10556         }
10557         memcpy(tbl_data, oentry, sizeof(*tbl_data));
10558         tbl_data->idx = idx;
10559         return &tbl_data->entry;
10560 }
10561
10562 void
10563 flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10564 {
10565         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10566         struct mlx5_flow_tbl_data_entry *tbl_data =
10567                     container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10568
10569         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10570 }
10571
10572 /**
10573  * Get a flow table.
10574  *
10575  * @param[in, out] dev
10576  *   Pointer to rte_eth_dev structure.
10577  * @param[in] table_level
10578  *   Table level to use.
10579  * @param[in] egress
10580  *   Direction of the table.
10581  * @param[in] transfer
10582  *   E-Switch or NIC flow.
10583  * @param[in] dummy
10584  *   Dummy entry for dv API.
10585  * @param[in] table_id
10586  *   Table id to use.
10587  * @param[out] error
10588  *   pointer to error structure.
10589  *
10590  * @return
10591  *   Returns tables resource based on the index, NULL in case of failed.
10592  */
10593 struct mlx5_flow_tbl_resource *
10594 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
10595                          uint32_t table_level, uint8_t egress,
10596                          uint8_t transfer,
10597                          bool external,
10598                          const struct mlx5_flow_tunnel *tunnel,
10599                          uint32_t group_id, uint8_t dummy,
10600                          uint32_t table_id,
10601                          struct rte_flow_error *error)
10602 {
10603         struct mlx5_priv *priv = dev->data->dev_private;
10604         union mlx5_flow_tbl_key table_key = {
10605                 {
10606                         .level = table_level,
10607                         .id = table_id,
10608                         .reserved = 0,
10609                         .dummy = !!dummy,
10610                         .is_fdb = !!transfer,
10611                         .is_egress = !!egress,
10612                 }
10613         };
10614         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
10615                 .tunnel = tunnel,
10616                 .group_id = group_id,
10617                 .external = external,
10618         };
10619         struct mlx5_flow_cb_ctx ctx = {
10620                 .dev = dev,
10621                 .error = error,
10622                 .data = &table_key.v64,
10623                 .data2 = &tt_prm,
10624         };
10625         struct mlx5_list_entry *entry;
10626         struct mlx5_flow_tbl_data_entry *tbl_data;
10627
10628         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
10629         if (!entry) {
10630                 rte_flow_error_set(error, ENOMEM,
10631                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10632                                    "cannot get table");
10633                 return NULL;
10634         }
10635         DRV_LOG(DEBUG, "table_level %u table_id %u "
10636                 "tunnel %u group %u registered.",
10637                 table_level, table_id,
10638                 tunnel ? tunnel->tunnel_id : 0, group_id);
10639         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10640         return &tbl_data->tbl;
10641 }
10642
10643 void
10644 flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10645 {
10646         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10647         struct mlx5_flow_tbl_data_entry *tbl_data =
10648                     container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10649
10650         MLX5_ASSERT(entry && sh);
10651         if (tbl_data->jump.action)
10652                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10653         if (tbl_data->tbl.obj)
10654                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
10655         if (tbl_data->tunnel_offload && tbl_data->external) {
10656                 struct mlx5_list_entry *he;
10657                 struct mlx5_hlist *tunnel_grp_hash;
10658                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
10659                 union tunnel_tbl_key tunnel_key = {
10660                         .tunnel_id = tbl_data->tunnel ?
10661                                         tbl_data->tunnel->tunnel_id : 0,
10662                         .group = tbl_data->group_id
10663                 };
10664                 uint32_t table_level = tbl_data->level;
10665                 struct mlx5_flow_cb_ctx ctx = {
10666                         .data = (void *)&tunnel_key.val,
10667                 };
10668
10669                 tunnel_grp_hash = tbl_data->tunnel ?
10670                                         tbl_data->tunnel->groups :
10671                                         thub->groups;
10672                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, &ctx);
10673                 if (he)
10674                         mlx5_hlist_unregister(tunnel_grp_hash, he);
10675                 DRV_LOG(DEBUG,
10676                         "table_level %u id %u tunnel %u group %u released.",
10677                         table_level,
10678                         tbl_data->id,
10679                         tbl_data->tunnel ?
10680                         tbl_data->tunnel->tunnel_id : 0,
10681                         tbl_data->group_id);
10682         }
10683         if (tbl_data->matchers)
10684                 mlx5_list_destroy(tbl_data->matchers);
10685         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10686 }
10687
10688 /**
10689  * Release a flow table.
10690  *
10691  * @param[in] sh
10692  *   Pointer to device shared structure.
10693  * @param[in] tbl
10694  *   Table resource to be released.
10695  *
10696  * @return
10697  *   Returns 0 if table was released, else return 1;
10698  */
10699 static int
10700 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
10701                              struct mlx5_flow_tbl_resource *tbl)
10702 {
10703         struct mlx5_flow_tbl_data_entry *tbl_data =
10704                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10705
10706         if (!tbl)
10707                 return 0;
10708         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
10709 }
10710
10711 int
10712 flow_dv_matcher_match_cb(void *tool_ctx __rte_unused,
10713                          struct mlx5_list_entry *entry, void *cb_ctx)
10714 {
10715         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10716         struct mlx5_flow_dv_matcher *ref = ctx->data;
10717         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
10718                                                         entry);
10719
10720         return cur->crc != ref->crc ||
10721                cur->priority != ref->priority ||
10722                memcmp((const void *)cur->mask.buf,
10723                       (const void *)ref->mask.buf, ref->mask.size);
10724 }
10725
10726 struct mlx5_list_entry *
10727 flow_dv_matcher_create_cb(void *tool_ctx, void *cb_ctx)
10728 {
10729         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10730         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10731         struct mlx5_flow_dv_matcher *ref = ctx->data;
10732         struct mlx5_flow_dv_matcher *resource;
10733         struct mlx5dv_flow_matcher_attr dv_attr = {
10734                 .type = IBV_FLOW_ATTR_NORMAL,
10735                 .match_mask = (void *)&ref->mask,
10736         };
10737         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10738                                                             typeof(*tbl), tbl);
10739         int ret;
10740
10741         resource = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*resource), 0,
10742                                SOCKET_ID_ANY);
10743         if (!resource) {
10744                 rte_flow_error_set(ctx->error, ENOMEM,
10745                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10746                                    "cannot create matcher");
10747                 return NULL;
10748         }
10749         *resource = *ref;
10750         dv_attr.match_criteria_enable =
10751                 flow_dv_matcher_enable(resource->mask.buf);
10752         __flow_dv_adjust_buf_size(&ref->mask.size,
10753                                   dv_attr.match_criteria_enable);
10754         dv_attr.priority = ref->priority;
10755         if (tbl->is_egress)
10756                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
10757         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
10758                                                tbl->tbl.obj,
10759                                                &resource->matcher_object);
10760         if (ret) {
10761                 mlx5_free(resource);
10762                 rte_flow_error_set(ctx->error, ENOMEM,
10763                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10764                                    "cannot create matcher");
10765                 return NULL;
10766         }
10767         return &resource->entry;
10768 }
10769
10770 /**
10771  * Register the flow matcher.
10772  *
10773  * @param[in, out] dev
10774  *   Pointer to rte_eth_dev structure.
10775  * @param[in, out] matcher
10776  *   Pointer to flow matcher.
10777  * @param[in, out] key
10778  *   Pointer to flow table key.
10779  * @parm[in, out] dev_flow
10780  *   Pointer to the dev_flow.
10781  * @param[out] error
10782  *   pointer to error structure.
10783  *
10784  * @return
10785  *   0 on success otherwise -errno and errno is set.
10786  */
10787 static int
10788 flow_dv_matcher_register(struct rte_eth_dev *dev,
10789                          struct mlx5_flow_dv_matcher *ref,
10790                          union mlx5_flow_tbl_key *key,
10791                          struct mlx5_flow *dev_flow,
10792                          const struct mlx5_flow_tunnel *tunnel,
10793                          uint32_t group_id,
10794                          struct rte_flow_error *error)
10795 {
10796         struct mlx5_list_entry *entry;
10797         struct mlx5_flow_dv_matcher *resource;
10798         struct mlx5_flow_tbl_resource *tbl;
10799         struct mlx5_flow_tbl_data_entry *tbl_data;
10800         struct mlx5_flow_cb_ctx ctx = {
10801                 .error = error,
10802                 .data = ref,
10803         };
10804         /**
10805          * tunnel offload API requires this registration for cases when
10806          * tunnel match rule was inserted before tunnel set rule.
10807          */
10808         tbl = flow_dv_tbl_resource_get(dev, key->level,
10809                                        key->is_egress, key->is_fdb,
10810                                        dev_flow->external, tunnel,
10811                                        group_id, 0, key->id, error);
10812         if (!tbl)
10813                 return -rte_errno;      /* No need to refill the error info */
10814         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10815         ref->tbl = tbl;
10816         entry = mlx5_list_register(tbl_data->matchers, &ctx);
10817         if (!entry) {
10818                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10819                 return rte_flow_error_set(error, ENOMEM,
10820                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10821                                           "cannot allocate ref memory");
10822         }
10823         resource = container_of(entry, typeof(*resource), entry);
10824         dev_flow->handle->dvh.matcher = resource;
10825         return 0;
10826 }
10827
10828 struct mlx5_list_entry *
10829 flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx)
10830 {
10831         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10832         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10833         struct mlx5_flow_dv_tag_resource *entry;
10834         uint32_t idx = 0;
10835         int ret;
10836
10837         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10838         if (!entry) {
10839                 rte_flow_error_set(ctx->error, ENOMEM,
10840                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10841                                    "cannot allocate resource memory");
10842                 return NULL;
10843         }
10844         entry->idx = idx;
10845         entry->tag_id = *(uint32_t *)(ctx->data);
10846         ret = mlx5_flow_os_create_flow_action_tag(entry->tag_id,
10847                                                   &entry->action);
10848         if (ret) {
10849                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
10850                 rte_flow_error_set(ctx->error, ENOMEM,
10851                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10852                                    NULL, "cannot create action");
10853                 return NULL;
10854         }
10855         return &entry->entry;
10856 }
10857
10858 int
10859 flow_dv_tag_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10860                      void *cb_ctx)
10861 {
10862         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10863         struct mlx5_flow_dv_tag_resource *tag =
10864                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10865
10866         return *(uint32_t *)(ctx->data) != tag->tag_id;
10867 }
10868
10869 struct mlx5_list_entry *
10870 flow_dv_tag_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10871                      void *cb_ctx)
10872 {
10873         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10874         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10875         struct mlx5_flow_dv_tag_resource *entry;
10876         uint32_t idx = 0;
10877
10878         entry = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10879         if (!entry) {
10880                 rte_flow_error_set(ctx->error, ENOMEM,
10881                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10882                                    "cannot allocate tag resource memory");
10883                 return NULL;
10884         }
10885         memcpy(entry, oentry, sizeof(*entry));
10886         entry->idx = idx;
10887         return &entry->entry;
10888 }
10889
10890 void
10891 flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10892 {
10893         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10894         struct mlx5_flow_dv_tag_resource *tag =
10895                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10896
10897         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10898 }
10899
10900 /**
10901  * Find existing tag resource or create and register a new one.
10902  *
10903  * @param dev[in, out]
10904  *   Pointer to rte_eth_dev structure.
10905  * @param[in, out] tag_be24
10906  *   Tag value in big endian then R-shift 8.
10907  * @parm[in, out] dev_flow
10908  *   Pointer to the dev_flow.
10909  * @param[out] error
10910  *   pointer to error structure.
10911  *
10912  * @return
10913  *   0 on success otherwise -errno and errno is set.
10914  */
10915 static int
10916 flow_dv_tag_resource_register
10917                         (struct rte_eth_dev *dev,
10918                          uint32_t tag_be24,
10919                          struct mlx5_flow *dev_flow,
10920                          struct rte_flow_error *error)
10921 {
10922         struct mlx5_priv *priv = dev->data->dev_private;
10923         struct mlx5_flow_dv_tag_resource *resource;
10924         struct mlx5_list_entry *entry;
10925         struct mlx5_flow_cb_ctx ctx = {
10926                                         .error = error,
10927                                         .data = &tag_be24,
10928                                         };
10929         struct mlx5_hlist *tag_table;
10930
10931         tag_table = flow_dv_hlist_prepare(priv->sh, &priv->sh->tag_table,
10932                                       "tags",
10933                                       MLX5_TAGS_HLIST_ARRAY_SIZE,
10934                                       false, false, priv->sh,
10935                                       flow_dv_tag_create_cb,
10936                                       flow_dv_tag_match_cb,
10937                                       flow_dv_tag_remove_cb,
10938                                       flow_dv_tag_clone_cb,
10939                                       flow_dv_tag_clone_free_cb,
10940                                       error);
10941         if (unlikely(!tag_table))
10942                 return -rte_errno;
10943         entry = mlx5_hlist_register(tag_table, tag_be24, &ctx);
10944         if (entry) {
10945                 resource = container_of(entry, struct mlx5_flow_dv_tag_resource,
10946                                         entry);
10947                 dev_flow->handle->dvh.rix_tag = resource->idx;
10948                 dev_flow->dv.tag_resource = resource;
10949                 return 0;
10950         }
10951         return -rte_errno;
10952 }
10953
10954 void
10955 flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10956 {
10957         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10958         struct mlx5_flow_dv_tag_resource *tag =
10959                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10960
10961         MLX5_ASSERT(tag && sh && tag->action);
10962         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
10963         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
10964         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10965 }
10966
10967 /**
10968  * Release the tag.
10969  *
10970  * @param dev
10971  *   Pointer to Ethernet device.
10972  * @param tag_idx
10973  *   Tag index.
10974  *
10975  * @return
10976  *   1 while a reference on it exists, 0 when freed.
10977  */
10978 static int
10979 flow_dv_tag_release(struct rte_eth_dev *dev,
10980                     uint32_t tag_idx)
10981 {
10982         struct mlx5_priv *priv = dev->data->dev_private;
10983         struct mlx5_flow_dv_tag_resource *tag;
10984
10985         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
10986         if (!tag)
10987                 return 0;
10988         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
10989                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
10990         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
10991 }
10992
10993 /**
10994  * Translate action PORT_ID / REPRESENTED_PORT to vport.
10995  *
10996  * @param[in] dev
10997  *   Pointer to rte_eth_dev structure.
10998  * @param[in] action
10999  *   Pointer to action PORT_ID / REPRESENTED_PORT.
11000  * @param[out] dst_port_id
11001  *   The target port ID.
11002  * @param[out] error
11003  *   Pointer to the error structure.
11004  *
11005  * @return
11006  *   0 on success, a negative errno value otherwise and rte_errno is set.
11007  */
11008 static int
11009 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
11010                                  const struct rte_flow_action *action,
11011                                  uint32_t *dst_port_id,
11012                                  struct rte_flow_error *error)
11013 {
11014         uint32_t port;
11015         struct mlx5_priv *priv;
11016
11017         switch (action->type) {
11018         case RTE_FLOW_ACTION_TYPE_PORT_ID: {
11019                 const struct rte_flow_action_port_id *conf;
11020
11021                 conf = (const struct rte_flow_action_port_id *)action->conf;
11022                 port = conf->original ? dev->data->port_id : conf->id;
11023                 break;
11024         }
11025         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
11026                 const struct rte_flow_action_ethdev *ethdev;
11027
11028                 ethdev = (const struct rte_flow_action_ethdev *)action->conf;
11029                 port = ethdev->port_id;
11030                 break;
11031         }
11032         default:
11033                 MLX5_ASSERT(false);
11034                 return rte_flow_error_set(error, EINVAL,
11035                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
11036                                           "unknown E-Switch action");
11037         }
11038
11039         priv = mlx5_port_to_eswitch_info(port, false);
11040         if (!priv)
11041                 return rte_flow_error_set(error, -rte_errno,
11042                                           RTE_FLOW_ERROR_TYPE_ACTION,
11043                                           NULL,
11044                                           "No eswitch info was found for port");
11045 #ifdef HAVE_MLX5DV_DR_CREATE_DEST_IB_PORT
11046         /*
11047          * This parameter is transferred to
11048          * mlx5dv_dr_action_create_dest_ib_port().
11049          */
11050         *dst_port_id = priv->dev_port;
11051 #else
11052         /*
11053          * Legacy mode, no LAG configurations is supported.
11054          * This parameter is transferred to
11055          * mlx5dv_dr_action_create_dest_vport().
11056          */
11057         *dst_port_id = priv->vport_id;
11058 #endif
11059         return 0;
11060 }
11061
11062 /**
11063  * Create a counter with aging configuration.
11064  *
11065  * @param[in] dev
11066  *   Pointer to rte_eth_dev structure.
11067  * @param[in] dev_flow
11068  *   Pointer to the mlx5_flow.
11069  * @param[out] count
11070  *   Pointer to the counter action configuration.
11071  * @param[in] age
11072  *   Pointer to the aging action configuration.
11073  *
11074  * @return
11075  *   Index to flow counter on success, 0 otherwise.
11076  */
11077 static uint32_t
11078 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
11079                                 struct mlx5_flow *dev_flow,
11080                                 const struct rte_flow_action_count *count
11081                                         __rte_unused,
11082                                 const struct rte_flow_action_age *age)
11083 {
11084         uint32_t counter;
11085         struct mlx5_age_param *age_param;
11086
11087         counter = flow_dv_counter_alloc(dev, !!age);
11088         if (!counter || age == NULL)
11089                 return counter;
11090         age_param = flow_dv_counter_idx_get_age(dev, counter);
11091         age_param->context = age->context ? age->context :
11092                 (void *)(uintptr_t)(dev_flow->flow_idx);
11093         age_param->timeout = age->timeout;
11094         age_param->port_id = dev->data->port_id;
11095         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
11096         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
11097         return counter;
11098 }
11099
11100 /**
11101  * Add Tx queue matcher
11102  *
11103  * @param[in] dev
11104  *   Pointer to the dev struct.
11105  * @param[in, out] matcher
11106  *   Flow matcher.
11107  * @param[in, out] key
11108  *   Flow matcher value.
11109  * @param[in] item
11110  *   Flow pattern to translate.
11111  * @param[in] inner
11112  *   Item is inner pattern.
11113  */
11114 static void
11115 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
11116                                 void *matcher, void *key,
11117                                 const struct rte_flow_item *item)
11118 {
11119         const struct mlx5_rte_flow_item_tx_queue *queue_m;
11120         const struct mlx5_rte_flow_item_tx_queue *queue_v;
11121         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
11122         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
11123         struct mlx5_txq_ctrl *txq;
11124         uint32_t queue, mask;
11125
11126         queue_m = (const void *)item->mask;
11127         queue_v = (const void *)item->spec;
11128         if (!queue_v)
11129                 return;
11130         txq = mlx5_txq_get(dev, queue_v->queue);
11131         if (!txq)
11132                 return;
11133         if (txq->is_hairpin)
11134                 queue = txq->obj->sq->id;
11135         else
11136                 queue = txq->obj->sq_obj.sq->id;
11137         mask = queue_m == NULL ? UINT32_MAX : queue_m->queue;
11138         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, mask);
11139         MLX5_SET(fte_match_set_misc, misc_v, source_sqn, queue & mask);
11140         mlx5_txq_release(dev, queue_v->queue);
11141 }
11142
11143 /**
11144  * Set the hash fields according to the @p flow information.
11145  *
11146  * @param[in] item_flags
11147  *   The match pattern item flags.
11148  * @param[in] rss_desc
11149  *   Pointer to the mlx5_flow_rss_desc.
11150  * @param[out] hash_fields
11151  *   Pointer to the RSS hash fields.
11152  */
11153 void
11154 flow_dv_hashfields_set(uint64_t item_flags,
11155                        struct mlx5_flow_rss_desc *rss_desc,
11156                        uint64_t *hash_fields)
11157 {
11158         uint64_t items = item_flags;
11159         uint64_t fields = 0;
11160         int rss_inner = 0;
11161         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
11162
11163         *hash_fields = 0;
11164 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
11165         if (rss_desc->level >= 2)
11166                 rss_inner = 1;
11167 #endif
11168         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
11169             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4)) ||
11170              !items) {
11171                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
11172                         if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
11173                                 fields |= IBV_RX_HASH_SRC_IPV4;
11174                         else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
11175                                 fields |= IBV_RX_HASH_DST_IPV4;
11176                         else
11177                                 fields |= MLX5_IPV4_IBV_RX_HASH;
11178                 }
11179         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
11180                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6)) ||
11181                    !items) {
11182                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
11183                         if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
11184                                 fields |= IBV_RX_HASH_SRC_IPV6;
11185                         else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
11186                                 fields |= IBV_RX_HASH_DST_IPV6;
11187                         else
11188                                 fields |= MLX5_IPV6_IBV_RX_HASH;
11189                 }
11190         }
11191         if (fields == 0)
11192                 /*
11193                  * There is no match between the RSS types and the
11194                  * L3 protocol (IPv4/IPv6) defined in the flow rule.
11195                  */
11196                 return;
11197         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
11198             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP)) ||
11199             !items) {
11200                 if (rss_types & RTE_ETH_RSS_UDP) {
11201                         if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
11202                                 fields |= IBV_RX_HASH_SRC_PORT_UDP;
11203                         else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
11204                                 fields |= IBV_RX_HASH_DST_PORT_UDP;
11205                         else
11206                                 fields |= MLX5_UDP_IBV_RX_HASH;
11207                 }
11208         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
11209                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP)) ||
11210                    !items) {
11211                 if (rss_types & RTE_ETH_RSS_TCP) {
11212                         if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
11213                                 fields |= IBV_RX_HASH_SRC_PORT_TCP;
11214                         else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
11215                                 fields |= IBV_RX_HASH_DST_PORT_TCP;
11216                         else
11217                                 fields |= MLX5_TCP_IBV_RX_HASH;
11218                 }
11219         }
11220         if (rss_inner)
11221                 fields |= IBV_RX_HASH_INNER;
11222         *hash_fields = fields;
11223 }
11224
11225 /**
11226  * Prepare an Rx Hash queue.
11227  *
11228  * @param dev
11229  *   Pointer to Ethernet device.
11230  * @param[in] dev_flow
11231  *   Pointer to the mlx5_flow.
11232  * @param[in] rss_desc
11233  *   Pointer to the mlx5_flow_rss_desc.
11234  * @param[out] hrxq_idx
11235  *   Hash Rx queue index.
11236  *
11237  * @return
11238  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
11239  */
11240 static struct mlx5_hrxq *
11241 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
11242                      struct mlx5_flow *dev_flow,
11243                      struct mlx5_flow_rss_desc *rss_desc,
11244                      uint32_t *hrxq_idx)
11245 {
11246         struct mlx5_flow_handle *dh = dev_flow->handle;
11247         uint32_t shared_rss = rss_desc->shared_rss;
11248         struct mlx5_hrxq *hrxq;
11249
11250         MLX5_ASSERT(rss_desc->queue_num);
11251         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
11252         rss_desc->hash_fields = dev_flow->hash_fields;
11253         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
11254         rss_desc->shared_rss = 0;
11255         if (rss_desc->hash_fields == 0)
11256                 rss_desc->queue_num = 1;
11257         hrxq = mlx5_hrxq_get(dev, rss_desc);
11258         *hrxq_idx = hrxq ? hrxq->idx : 0;
11259         rss_desc->shared_rss = shared_rss;
11260         return hrxq;
11261 }
11262
11263 /**
11264  * Release sample sub action resource.
11265  *
11266  * @param[in, out] dev
11267  *   Pointer to rte_eth_dev structure.
11268  * @param[in] act_res
11269  *   Pointer to sample sub action resource.
11270  */
11271 static void
11272 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
11273                                    struct mlx5_flow_sub_actions_idx *act_res)
11274 {
11275         if (act_res->rix_hrxq) {
11276                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
11277                 act_res->rix_hrxq = 0;
11278         }
11279         if (act_res->rix_encap_decap) {
11280                 flow_dv_encap_decap_resource_release(dev,
11281                                                      act_res->rix_encap_decap);
11282                 act_res->rix_encap_decap = 0;
11283         }
11284         if (act_res->rix_port_id_action) {
11285                 flow_dv_port_id_action_resource_release(dev,
11286                                                 act_res->rix_port_id_action);
11287                 act_res->rix_port_id_action = 0;
11288         }
11289         if (act_res->rix_tag) {
11290                 flow_dv_tag_release(dev, act_res->rix_tag);
11291                 act_res->rix_tag = 0;
11292         }
11293         if (act_res->rix_jump) {
11294                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
11295                 act_res->rix_jump = 0;
11296         }
11297 }
11298
11299 int
11300 flow_dv_sample_match_cb(void *tool_ctx __rte_unused,
11301                         struct mlx5_list_entry *entry, void *cb_ctx)
11302 {
11303         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11304         struct rte_eth_dev *dev = ctx->dev;
11305         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
11306         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
11307                                                               typeof(*resource),
11308                                                               entry);
11309
11310         if (ctx_resource->ratio == resource->ratio &&
11311             ctx_resource->ft_type == resource->ft_type &&
11312             ctx_resource->ft_id == resource->ft_id &&
11313             ctx_resource->set_action == resource->set_action &&
11314             !memcmp((void *)&ctx_resource->sample_act,
11315                     (void *)&resource->sample_act,
11316                     sizeof(struct mlx5_flow_sub_actions_list))) {
11317                 /*
11318                  * Existing sample action should release the prepared
11319                  * sub-actions reference counter.
11320                  */
11321                 flow_dv_sample_sub_actions_release(dev,
11322                                                    &ctx_resource->sample_idx);
11323                 return 0;
11324         }
11325         return 1;
11326 }
11327
11328 struct mlx5_list_entry *
11329 flow_dv_sample_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11330 {
11331         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11332         struct rte_eth_dev *dev = ctx->dev;
11333         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
11334         void **sample_dv_actions = ctx_resource->sub_actions;
11335         struct mlx5_flow_dv_sample_resource *resource;
11336         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
11337         struct mlx5_priv *priv = dev->data->dev_private;
11338         struct mlx5_dev_ctx_shared *sh = priv->sh;
11339         struct mlx5_flow_tbl_resource *tbl;
11340         uint32_t idx = 0;
11341         const uint32_t next_ft_step = 1;
11342         uint32_t next_ft_id = ctx_resource->ft_id + next_ft_step;
11343         uint8_t is_egress = 0;
11344         uint8_t is_transfer = 0;
11345         struct rte_flow_error *error = ctx->error;
11346
11347         /* Register new sample resource. */
11348         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11349         if (!resource) {
11350                 rte_flow_error_set(error, ENOMEM,
11351                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11352                                           NULL,
11353                                           "cannot allocate resource memory");
11354                 return NULL;
11355         }
11356         *resource = *ctx_resource;
11357         /* Create normal path table level */
11358         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11359                 is_transfer = 1;
11360         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
11361                 is_egress = 1;
11362         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
11363                                         is_egress, is_transfer,
11364                                         true, NULL, 0, 0, 0, error);
11365         if (!tbl) {
11366                 rte_flow_error_set(error, ENOMEM,
11367                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11368                                           NULL,
11369                                           "fail to create normal path table "
11370                                           "for sample");
11371                 goto error;
11372         }
11373         resource->normal_path_tbl = tbl;
11374         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
11375                 if (!sh->default_miss_action) {
11376                         rte_flow_error_set(error, ENOMEM,
11377                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11378                                                 NULL,
11379                                                 "default miss action was not "
11380                                                 "created");
11381                         goto error;
11382                 }
11383                 sample_dv_actions[ctx_resource->sample_act.actions_num++] =
11384                                                 sh->default_miss_action;
11385         }
11386         /* Create a DR sample action */
11387         sampler_attr.sample_ratio = resource->ratio;
11388         sampler_attr.default_next_table = tbl->obj;
11389         sampler_attr.num_sample_actions = ctx_resource->sample_act.actions_num;
11390         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
11391                                                         &sample_dv_actions[0];
11392         sampler_attr.action = resource->set_action;
11393         if (mlx5_os_flow_dr_create_flow_action_sampler
11394                         (&sampler_attr, &resource->verbs_action)) {
11395                 rte_flow_error_set(error, ENOMEM,
11396                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11397                                         NULL, "cannot create sample action");
11398                 goto error;
11399         }
11400         resource->idx = idx;
11401         resource->dev = dev;
11402         return &resource->entry;
11403 error:
11404         if (resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
11405                 flow_dv_sample_sub_actions_release(dev,
11406                                                    &resource->sample_idx);
11407         if (resource->normal_path_tbl)
11408                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11409                                 resource->normal_path_tbl);
11410         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
11411         return NULL;
11412
11413 }
11414
11415 struct mlx5_list_entry *
11416 flow_dv_sample_clone_cb(void *tool_ctx __rte_unused,
11417                          struct mlx5_list_entry *entry __rte_unused,
11418                          void *cb_ctx)
11419 {
11420         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11421         struct rte_eth_dev *dev = ctx->dev;
11422         struct mlx5_flow_dv_sample_resource *resource;
11423         struct mlx5_priv *priv = dev->data->dev_private;
11424         struct mlx5_dev_ctx_shared *sh = priv->sh;
11425         uint32_t idx = 0;
11426
11427         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11428         if (!resource) {
11429                 rte_flow_error_set(ctx->error, ENOMEM,
11430                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11431                                           NULL,
11432                                           "cannot allocate resource memory");
11433                 return NULL;
11434         }
11435         memcpy(resource, entry, sizeof(*resource));
11436         resource->idx = idx;
11437         resource->dev = dev;
11438         return &resource->entry;
11439 }
11440
11441 void
11442 flow_dv_sample_clone_free_cb(void *tool_ctx __rte_unused,
11443                              struct mlx5_list_entry *entry)
11444 {
11445         struct mlx5_flow_dv_sample_resource *resource =
11446                                   container_of(entry, typeof(*resource), entry);
11447         struct rte_eth_dev *dev = resource->dev;
11448         struct mlx5_priv *priv = dev->data->dev_private;
11449
11450         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
11451 }
11452
11453 /**
11454  * Find existing sample resource or create and register a new one.
11455  *
11456  * @param[in, out] dev
11457  *   Pointer to rte_eth_dev structure.
11458  * @param[in] ref
11459  *   Pointer to sample resource reference.
11460  * @parm[in, out] dev_flow
11461  *   Pointer to the dev_flow.
11462  * @param[out] error
11463  *   pointer to error structure.
11464  *
11465  * @return
11466  *   0 on success otherwise -errno and errno is set.
11467  */
11468 static int
11469 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
11470                          struct mlx5_flow_dv_sample_resource *ref,
11471                          struct mlx5_flow *dev_flow,
11472                          struct rte_flow_error *error)
11473 {
11474         struct mlx5_flow_dv_sample_resource *resource;
11475         struct mlx5_list_entry *entry;
11476         struct mlx5_priv *priv = dev->data->dev_private;
11477         struct mlx5_flow_cb_ctx ctx = {
11478                 .dev = dev,
11479                 .error = error,
11480                 .data = ref,
11481         };
11482
11483         entry = mlx5_list_register(priv->sh->sample_action_list, &ctx);
11484         if (!entry)
11485                 return -rte_errno;
11486         resource = container_of(entry, typeof(*resource), entry);
11487         dev_flow->handle->dvh.rix_sample = resource->idx;
11488         dev_flow->dv.sample_res = resource;
11489         return 0;
11490 }
11491
11492 int
11493 flow_dv_dest_array_match_cb(void *tool_ctx __rte_unused,
11494                             struct mlx5_list_entry *entry, void *cb_ctx)
11495 {
11496         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11497         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11498         struct rte_eth_dev *dev = ctx->dev;
11499         struct mlx5_flow_dv_dest_array_resource *resource =
11500                                   container_of(entry, typeof(*resource), entry);
11501         uint32_t idx = 0;
11502
11503         if (ctx_resource->num_of_dest == resource->num_of_dest &&
11504             ctx_resource->ft_type == resource->ft_type &&
11505             !memcmp((void *)resource->sample_act,
11506                     (void *)ctx_resource->sample_act,
11507                    (ctx_resource->num_of_dest *
11508                    sizeof(struct mlx5_flow_sub_actions_list)))) {
11509                 /*
11510                  * Existing sample action should release the prepared
11511                  * sub-actions reference counter.
11512                  */
11513                 for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11514                         flow_dv_sample_sub_actions_release(dev,
11515                                         &ctx_resource->sample_idx[idx]);
11516                 return 0;
11517         }
11518         return 1;
11519 }
11520
11521 struct mlx5_list_entry *
11522 flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11523 {
11524         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11525         struct rte_eth_dev *dev = ctx->dev;
11526         struct mlx5_flow_dv_dest_array_resource *resource;
11527         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11528         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
11529         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
11530         struct mlx5_priv *priv = dev->data->dev_private;
11531         struct mlx5_dev_ctx_shared *sh = priv->sh;
11532         struct mlx5_flow_sub_actions_list *sample_act;
11533         struct mlx5dv_dr_domain *domain;
11534         uint32_t idx = 0, res_idx = 0;
11535         struct rte_flow_error *error = ctx->error;
11536         uint64_t action_flags;
11537         int ret;
11538
11539         /* Register new destination array resource. */
11540         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11541                                             &res_idx);
11542         if (!resource) {
11543                 rte_flow_error_set(error, ENOMEM,
11544                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11545                                           NULL,
11546                                           "cannot allocate resource memory");
11547                 return NULL;
11548         }
11549         *resource = *ctx_resource;
11550         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11551                 domain = sh->fdb_domain;
11552         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
11553                 domain = sh->rx_domain;
11554         else
11555                 domain = sh->tx_domain;
11556         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11557                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
11558                                  mlx5_malloc(MLX5_MEM_ZERO,
11559                                  sizeof(struct mlx5dv_dr_action_dest_attr),
11560                                  0, SOCKET_ID_ANY);
11561                 if (!dest_attr[idx]) {
11562                         rte_flow_error_set(error, ENOMEM,
11563                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11564                                            NULL,
11565                                            "cannot allocate resource memory");
11566                         goto error;
11567                 }
11568                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
11569                 sample_act = &ctx_resource->sample_act[idx];
11570                 action_flags = sample_act->action_flags;
11571                 switch (action_flags) {
11572                 case MLX5_FLOW_ACTION_QUEUE:
11573                         dest_attr[idx]->dest = sample_act->dr_queue_action;
11574                         break;
11575                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
11576                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
11577                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
11578                         dest_attr[idx]->dest_reformat->reformat =
11579                                         sample_act->dr_encap_action;
11580                         dest_attr[idx]->dest_reformat->dest =
11581                                         sample_act->dr_port_id_action;
11582                         break;
11583                 case MLX5_FLOW_ACTION_PORT_ID:
11584                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
11585                         break;
11586                 case MLX5_FLOW_ACTION_JUMP:
11587                         dest_attr[idx]->dest = sample_act->dr_jump_action;
11588                         break;
11589                 default:
11590                         rte_flow_error_set(error, EINVAL,
11591                                            RTE_FLOW_ERROR_TYPE_ACTION,
11592                                            NULL,
11593                                            "unsupported actions type");
11594                         goto error;
11595                 }
11596         }
11597         /* create a dest array action */
11598         ret = mlx5_os_flow_dr_create_flow_action_dest_array
11599                                                 (domain,
11600                                                  resource->num_of_dest,
11601                                                  dest_attr,
11602                                                  &resource->action);
11603         if (ret) {
11604                 rte_flow_error_set(error, ENOMEM,
11605                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11606                                    NULL,
11607                                    "cannot create destination array action");
11608                 goto error;
11609         }
11610         resource->idx = res_idx;
11611         resource->dev = dev;
11612         for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11613                 mlx5_free(dest_attr[idx]);
11614         return &resource->entry;
11615 error:
11616         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11617                 flow_dv_sample_sub_actions_release(dev,
11618                                                    &resource->sample_idx[idx]);
11619                 if (dest_attr[idx])
11620                         mlx5_free(dest_attr[idx]);
11621         }
11622         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
11623         return NULL;
11624 }
11625
11626 struct mlx5_list_entry *
11627 flow_dv_dest_array_clone_cb(void *tool_ctx __rte_unused,
11628                             struct mlx5_list_entry *entry __rte_unused,
11629                             void *cb_ctx)
11630 {
11631         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11632         struct rte_eth_dev *dev = ctx->dev;
11633         struct mlx5_flow_dv_dest_array_resource *resource;
11634         struct mlx5_priv *priv = dev->data->dev_private;
11635         struct mlx5_dev_ctx_shared *sh = priv->sh;
11636         uint32_t res_idx = 0;
11637         struct rte_flow_error *error = ctx->error;
11638
11639         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11640                                       &res_idx);
11641         if (!resource) {
11642                 rte_flow_error_set(error, ENOMEM,
11643                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11644                                           NULL,
11645                                           "cannot allocate dest-array memory");
11646                 return NULL;
11647         }
11648         memcpy(resource, entry, sizeof(*resource));
11649         resource->idx = res_idx;
11650         resource->dev = dev;
11651         return &resource->entry;
11652 }
11653
11654 void
11655 flow_dv_dest_array_clone_free_cb(void *tool_ctx __rte_unused,
11656                                  struct mlx5_list_entry *entry)
11657 {
11658         struct mlx5_flow_dv_dest_array_resource *resource =
11659                         container_of(entry, typeof(*resource), entry);
11660         struct rte_eth_dev *dev = resource->dev;
11661         struct mlx5_priv *priv = dev->data->dev_private;
11662
11663         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
11664 }
11665
11666 /**
11667  * Find existing destination array resource or create and register a new one.
11668  *
11669  * @param[in, out] dev
11670  *   Pointer to rte_eth_dev structure.
11671  * @param[in] ref
11672  *   Pointer to destination array resource reference.
11673  * @parm[in, out] dev_flow
11674  *   Pointer to the dev_flow.
11675  * @param[out] error
11676  *   pointer to error structure.
11677  *
11678  * @return
11679  *   0 on success otherwise -errno and errno is set.
11680  */
11681 static int
11682 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
11683                          struct mlx5_flow_dv_dest_array_resource *ref,
11684                          struct mlx5_flow *dev_flow,
11685                          struct rte_flow_error *error)
11686 {
11687         struct mlx5_flow_dv_dest_array_resource *resource;
11688         struct mlx5_priv *priv = dev->data->dev_private;
11689         struct mlx5_list_entry *entry;
11690         struct mlx5_flow_cb_ctx ctx = {
11691                 .dev = dev,
11692                 .error = error,
11693                 .data = ref,
11694         };
11695
11696         entry = mlx5_list_register(priv->sh->dest_array_list, &ctx);
11697         if (!entry)
11698                 return -rte_errno;
11699         resource = container_of(entry, typeof(*resource), entry);
11700         dev_flow->handle->dvh.rix_dest_array = resource->idx;
11701         dev_flow->dv.dest_array_res = resource;
11702         return 0;
11703 }
11704
11705 /**
11706  * Convert Sample action to DV specification.
11707  *
11708  * @param[in] dev
11709  *   Pointer to rte_eth_dev structure.
11710  * @param[in] action
11711  *   Pointer to sample action structure.
11712  * @param[in, out] dev_flow
11713  *   Pointer to the mlx5_flow.
11714  * @param[in] attr
11715  *   Pointer to the flow attributes.
11716  * @param[in, out] num_of_dest
11717  *   Pointer to the num of destination.
11718  * @param[in, out] sample_actions
11719  *   Pointer to sample actions list.
11720  * @param[in, out] res
11721  *   Pointer to sample resource.
11722  * @param[out] error
11723  *   Pointer to the error structure.
11724  *
11725  * @return
11726  *   0 on success, a negative errno value otherwise and rte_errno is set.
11727  */
11728 static int
11729 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
11730                                 const struct rte_flow_action_sample *action,
11731                                 struct mlx5_flow *dev_flow,
11732                                 const struct rte_flow_attr *attr,
11733                                 uint32_t *num_of_dest,
11734                                 void **sample_actions,
11735                                 struct mlx5_flow_dv_sample_resource *res,
11736                                 struct rte_flow_error *error)
11737 {
11738         struct mlx5_priv *priv = dev->data->dev_private;
11739         const struct rte_flow_action *sub_actions;
11740         struct mlx5_flow_sub_actions_list *sample_act;
11741         struct mlx5_flow_sub_actions_idx *sample_idx;
11742         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11743         struct rte_flow *flow = dev_flow->flow;
11744         struct mlx5_flow_rss_desc *rss_desc;
11745         uint64_t action_flags = 0;
11746
11747         MLX5_ASSERT(wks);
11748         rss_desc = &wks->rss_desc;
11749         sample_act = &res->sample_act;
11750         sample_idx = &res->sample_idx;
11751         res->ratio = action->ratio;
11752         sub_actions = action->actions;
11753         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
11754                 int type = sub_actions->type;
11755                 uint32_t pre_rix = 0;
11756                 void *pre_r;
11757                 switch (type) {
11758                 case RTE_FLOW_ACTION_TYPE_QUEUE:
11759                 {
11760                         const struct rte_flow_action_queue *queue;
11761                         struct mlx5_hrxq *hrxq;
11762                         uint32_t hrxq_idx;
11763
11764                         queue = sub_actions->conf;
11765                         rss_desc->queue_num = 1;
11766                         rss_desc->queue[0] = queue->index;
11767                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11768                                                     rss_desc, &hrxq_idx);
11769                         if (!hrxq)
11770                                 return rte_flow_error_set
11771                                         (error, rte_errno,
11772                                          RTE_FLOW_ERROR_TYPE_ACTION,
11773                                          NULL,
11774                                          "cannot create fate queue");
11775                         sample_act->dr_queue_action = hrxq->action;
11776                         sample_idx->rix_hrxq = hrxq_idx;
11777                         sample_actions[sample_act->actions_num++] =
11778                                                 hrxq->action;
11779                         (*num_of_dest)++;
11780                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
11781                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11782                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11783                         dev_flow->handle->fate_action =
11784                                         MLX5_FLOW_FATE_QUEUE;
11785                         break;
11786                 }
11787                 case RTE_FLOW_ACTION_TYPE_RSS:
11788                 {
11789                         struct mlx5_hrxq *hrxq;
11790                         uint32_t hrxq_idx;
11791                         const struct rte_flow_action_rss *rss;
11792                         const uint8_t *rss_key;
11793
11794                         rss = sub_actions->conf;
11795                         memcpy(rss_desc->queue, rss->queue,
11796                                rss->queue_num * sizeof(uint16_t));
11797                         rss_desc->queue_num = rss->queue_num;
11798                         /* NULL RSS key indicates default RSS key. */
11799                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11800                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11801                         /*
11802                          * rss->level and rss.types should be set in advance
11803                          * when expanding items for RSS.
11804                          */
11805                         flow_dv_hashfields_set(dev_flow->handle->layers,
11806                                                rss_desc,
11807                                                &dev_flow->hash_fields);
11808                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11809                                                     rss_desc, &hrxq_idx);
11810                         if (!hrxq)
11811                                 return rte_flow_error_set
11812                                         (error, rte_errno,
11813                                          RTE_FLOW_ERROR_TYPE_ACTION,
11814                                          NULL,
11815                                          "cannot create fate queue");
11816                         sample_act->dr_queue_action = hrxq->action;
11817                         sample_idx->rix_hrxq = hrxq_idx;
11818                         sample_actions[sample_act->actions_num++] =
11819                                                 hrxq->action;
11820                         (*num_of_dest)++;
11821                         action_flags |= MLX5_FLOW_ACTION_RSS;
11822                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11823                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11824                         dev_flow->handle->fate_action =
11825                                         MLX5_FLOW_FATE_QUEUE;
11826                         break;
11827                 }
11828                 case RTE_FLOW_ACTION_TYPE_MARK:
11829                 {
11830                         uint32_t tag_be = mlx5_flow_mark_set
11831                                 (((const struct rte_flow_action_mark *)
11832                                 (sub_actions->conf))->id);
11833
11834                         wks->mark = 1;
11835                         pre_rix = dev_flow->handle->dvh.rix_tag;
11836                         /* Save the mark resource before sample */
11837                         pre_r = dev_flow->dv.tag_resource;
11838                         if (flow_dv_tag_resource_register(dev, tag_be,
11839                                                   dev_flow, error))
11840                                 return -rte_errno;
11841                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11842                         sample_act->dr_tag_action =
11843                                 dev_flow->dv.tag_resource->action;
11844                         sample_idx->rix_tag =
11845                                 dev_flow->handle->dvh.rix_tag;
11846                         sample_actions[sample_act->actions_num++] =
11847                                                 sample_act->dr_tag_action;
11848                         /* Recover the mark resource after sample */
11849                         dev_flow->dv.tag_resource = pre_r;
11850                         dev_flow->handle->dvh.rix_tag = pre_rix;
11851                         action_flags |= MLX5_FLOW_ACTION_MARK;
11852                         break;
11853                 }
11854                 case RTE_FLOW_ACTION_TYPE_COUNT:
11855                 {
11856                         if (!flow->counter) {
11857                                 flow->counter =
11858                                         flow_dv_translate_create_counter(dev,
11859                                                 dev_flow, sub_actions->conf,
11860                                                 0);
11861                                 if (!flow->counter)
11862                                         return rte_flow_error_set
11863                                                 (error, rte_errno,
11864                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11865                                                 NULL,
11866                                                 "cannot create counter"
11867                                                 " object.");
11868                         }
11869                         sample_act->dr_cnt_action =
11870                                   (flow_dv_counter_get_by_idx(dev,
11871                                   flow->counter, NULL))->action;
11872                         sample_actions[sample_act->actions_num++] =
11873                                                 sample_act->dr_cnt_action;
11874                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11875                         break;
11876                 }
11877                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11878                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
11879                 {
11880                         struct mlx5_flow_dv_port_id_action_resource
11881                                         port_id_resource;
11882                         uint32_t port_id = 0;
11883
11884                         memset(&port_id_resource, 0, sizeof(port_id_resource));
11885                         /* Save the port id resource before sample */
11886                         pre_rix = dev_flow->handle->rix_port_id_action;
11887                         pre_r = dev_flow->dv.port_id_action;
11888                         if (flow_dv_translate_action_port_id(dev, sub_actions,
11889                                                              &port_id, error))
11890                                 return -rte_errno;
11891                         port_id_resource.port_id = port_id;
11892                         if (flow_dv_port_id_action_resource_register
11893                             (dev, &port_id_resource, dev_flow, error))
11894                                 return -rte_errno;
11895                         sample_act->dr_port_id_action =
11896                                 dev_flow->dv.port_id_action->action;
11897                         sample_idx->rix_port_id_action =
11898                                 dev_flow->handle->rix_port_id_action;
11899                         sample_actions[sample_act->actions_num++] =
11900                                                 sample_act->dr_port_id_action;
11901                         /* Recover the port id resource after sample */
11902                         dev_flow->dv.port_id_action = pre_r;
11903                         dev_flow->handle->rix_port_id_action = pre_rix;
11904                         (*num_of_dest)++;
11905                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11906                         break;
11907                 }
11908                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11909                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11910                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11911                         /* Save the encap resource before sample */
11912                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
11913                         pre_r = dev_flow->dv.encap_decap;
11914                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
11915                                                            dev_flow,
11916                                                            attr->transfer,
11917                                                            error))
11918                                 return -rte_errno;
11919                         sample_act->dr_encap_action =
11920                                 dev_flow->dv.encap_decap->action;
11921                         sample_idx->rix_encap_decap =
11922                                 dev_flow->handle->dvh.rix_encap_decap;
11923                         sample_actions[sample_act->actions_num++] =
11924                                                 sample_act->dr_encap_action;
11925                         /* Recover the encap resource after sample */
11926                         dev_flow->dv.encap_decap = pre_r;
11927                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
11928                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11929                         break;
11930                 default:
11931                         return rte_flow_error_set(error, EINVAL,
11932                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11933                                 NULL,
11934                                 "Not support for sampler action");
11935                 }
11936         }
11937         sample_act->action_flags = action_flags;
11938         res->ft_id = dev_flow->dv.group;
11939         if (attr->transfer) {
11940                 union {
11941                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
11942                         uint64_t set_action;
11943                 } action_ctx = { .set_action = 0 };
11944
11945                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
11946                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
11947                          MLX5_MODIFICATION_TYPE_SET);
11948                 MLX5_SET(set_action_in, action_ctx.action_in, field,
11949                          MLX5_MODI_META_REG_C_0);
11950                 MLX5_SET(set_action_in, action_ctx.action_in, data,
11951                          priv->vport_meta_tag);
11952                 res->set_action = action_ctx.set_action;
11953         } else if (attr->ingress) {
11954                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11955         } else {
11956                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
11957         }
11958         return 0;
11959 }
11960
11961 /**
11962  * Convert Sample action to DV specification.
11963  *
11964  * @param[in] dev
11965  *   Pointer to rte_eth_dev structure.
11966  * @param[in, out] dev_flow
11967  *   Pointer to the mlx5_flow.
11968  * @param[in] num_of_dest
11969  *   The num of destination.
11970  * @param[in, out] res
11971  *   Pointer to sample resource.
11972  * @param[in, out] mdest_res
11973  *   Pointer to destination array resource.
11974  * @param[in] sample_actions
11975  *   Pointer to sample path actions list.
11976  * @param[in] action_flags
11977  *   Holds the actions detected until now.
11978  * @param[out] error
11979  *   Pointer to the error structure.
11980  *
11981  * @return
11982  *   0 on success, a negative errno value otherwise and rte_errno is set.
11983  */
11984 static int
11985 flow_dv_create_action_sample(struct rte_eth_dev *dev,
11986                              struct mlx5_flow *dev_flow,
11987                              uint32_t num_of_dest,
11988                              struct mlx5_flow_dv_sample_resource *res,
11989                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
11990                              void **sample_actions,
11991                              uint64_t action_flags,
11992                              struct rte_flow_error *error)
11993 {
11994         /* update normal path action resource into last index of array */
11995         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
11996         struct mlx5_flow_sub_actions_list *sample_act =
11997                                         &mdest_res->sample_act[dest_index];
11998         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11999         struct mlx5_flow_rss_desc *rss_desc;
12000         uint32_t normal_idx = 0;
12001         struct mlx5_hrxq *hrxq;
12002         uint32_t hrxq_idx;
12003
12004         MLX5_ASSERT(wks);
12005         rss_desc = &wks->rss_desc;
12006         if (num_of_dest > 1) {
12007                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
12008                         /* Handle QP action for mirroring */
12009                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
12010                                                     rss_desc, &hrxq_idx);
12011                         if (!hrxq)
12012                                 return rte_flow_error_set
12013                                      (error, rte_errno,
12014                                       RTE_FLOW_ERROR_TYPE_ACTION,
12015                                       NULL,
12016                                       "cannot create rx queue");
12017                         normal_idx++;
12018                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
12019                         sample_act->dr_queue_action = hrxq->action;
12020                         if (action_flags & MLX5_FLOW_ACTION_MARK)
12021                                 dev_flow->handle->rix_hrxq = hrxq_idx;
12022                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
12023                 }
12024                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
12025                         normal_idx++;
12026                         mdest_res->sample_idx[dest_index].rix_encap_decap =
12027                                 dev_flow->handle->dvh.rix_encap_decap;
12028                         sample_act->dr_encap_action =
12029                                 dev_flow->dv.encap_decap->action;
12030                         dev_flow->handle->dvh.rix_encap_decap = 0;
12031                 }
12032                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
12033                         normal_idx++;
12034                         mdest_res->sample_idx[dest_index].rix_port_id_action =
12035                                 dev_flow->handle->rix_port_id_action;
12036                         sample_act->dr_port_id_action =
12037                                 dev_flow->dv.port_id_action->action;
12038                         dev_flow->handle->rix_port_id_action = 0;
12039                 }
12040                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
12041                         normal_idx++;
12042                         mdest_res->sample_idx[dest_index].rix_jump =
12043                                 dev_flow->handle->rix_jump;
12044                         sample_act->dr_jump_action =
12045                                 dev_flow->dv.jump->action;
12046                         dev_flow->handle->rix_jump = 0;
12047                 }
12048                 sample_act->actions_num = normal_idx;
12049                 /* update sample action resource into first index of array */
12050                 mdest_res->ft_type = res->ft_type;
12051                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
12052                                 sizeof(struct mlx5_flow_sub_actions_idx));
12053                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
12054                                 sizeof(struct mlx5_flow_sub_actions_list));
12055                 mdest_res->num_of_dest = num_of_dest;
12056                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
12057                                                          dev_flow, error))
12058                         return rte_flow_error_set(error, EINVAL,
12059                                                   RTE_FLOW_ERROR_TYPE_ACTION,
12060                                                   NULL, "can't create sample "
12061                                                   "action");
12062         } else {
12063                 res->sub_actions = sample_actions;
12064                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
12065                         return rte_flow_error_set(error, EINVAL,
12066                                                   RTE_FLOW_ERROR_TYPE_ACTION,
12067                                                   NULL,
12068                                                   "can't create sample action");
12069         }
12070         return 0;
12071 }
12072
12073 /**
12074  * Remove an ASO age action from age actions list.
12075  *
12076  * @param[in] dev
12077  *   Pointer to the Ethernet device structure.
12078  * @param[in] age
12079  *   Pointer to the aso age action handler.
12080  */
12081 static void
12082 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
12083                                 struct mlx5_aso_age_action *age)
12084 {
12085         struct mlx5_age_info *age_info;
12086         struct mlx5_age_param *age_param = &age->age_params;
12087         struct mlx5_priv *priv = dev->data->dev_private;
12088         uint16_t expected = AGE_CANDIDATE;
12089
12090         age_info = GET_PORT_AGE_INFO(priv);
12091         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
12092                                          AGE_FREE, false, __ATOMIC_RELAXED,
12093                                          __ATOMIC_RELAXED)) {
12094                 /**
12095                  * We need the lock even it is age timeout,
12096                  * since age action may still in process.
12097                  */
12098                 rte_spinlock_lock(&age_info->aged_sl);
12099                 LIST_REMOVE(age, next);
12100                 rte_spinlock_unlock(&age_info->aged_sl);
12101                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
12102         }
12103 }
12104
12105 /**
12106  * Release an ASO age action.
12107  *
12108  * @param[in] dev
12109  *   Pointer to the Ethernet device structure.
12110  * @param[in] age_idx
12111  *   Index of ASO age action to release.
12112  * @param[in] flow
12113  *   True if the release operation is during flow destroy operation.
12114  *   False if the release operation is during action destroy operation.
12115  *
12116  * @return
12117  *   0 when age action was removed, otherwise the number of references.
12118  */
12119 static int
12120 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
12121 {
12122         struct mlx5_priv *priv = dev->data->dev_private;
12123         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
12124         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
12125         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
12126
12127         if (!ret) {
12128                 flow_dv_aso_age_remove_from_age(dev, age);
12129                 rte_spinlock_lock(&mng->free_sl);
12130                 LIST_INSERT_HEAD(&mng->free, age, next);
12131                 rte_spinlock_unlock(&mng->free_sl);
12132         }
12133         return ret;
12134 }
12135
12136 /**
12137  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
12138  *
12139  * @param[in] dev
12140  *   Pointer to the Ethernet device structure.
12141  *
12142  * @return
12143  *   0 on success, otherwise negative errno value and rte_errno is set.
12144  */
12145 static int
12146 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
12147 {
12148         struct mlx5_priv *priv = dev->data->dev_private;
12149         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
12150         void *old_pools = mng->pools;
12151         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
12152         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
12153         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
12154
12155         if (!pools) {
12156                 rte_errno = ENOMEM;
12157                 return -ENOMEM;
12158         }
12159         if (old_pools) {
12160                 memcpy(pools, old_pools,
12161                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
12162                 mlx5_free(old_pools);
12163         } else {
12164                 /* First ASO flow hit allocation - starting ASO data-path. */
12165                 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
12166
12167                 if (ret) {
12168                         mlx5_free(pools);
12169                         return ret;
12170                 }
12171         }
12172         mng->n = resize;
12173         mng->pools = pools;
12174         return 0;
12175 }
12176
12177 /**
12178  * Create and initialize a new ASO aging pool.
12179  *
12180  * @param[in] dev
12181  *   Pointer to the Ethernet device structure.
12182  * @param[out] age_free
12183  *   Where to put the pointer of a new age action.
12184  *
12185  * @return
12186  *   The age actions pool pointer and @p age_free is set on success,
12187  *   NULL otherwise and rte_errno is set.
12188  */
12189 static struct mlx5_aso_age_pool *
12190 flow_dv_age_pool_create(struct rte_eth_dev *dev,
12191                         struct mlx5_aso_age_action **age_free)
12192 {
12193         struct mlx5_priv *priv = dev->data->dev_private;
12194         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
12195         struct mlx5_aso_age_pool *pool = NULL;
12196         struct mlx5_devx_obj *obj = NULL;
12197         uint32_t i;
12198
12199         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->cdev->ctx,
12200                                                     priv->sh->cdev->pdn);
12201         if (!obj) {
12202                 rte_errno = ENODATA;
12203                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
12204                 return NULL;
12205         }
12206         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
12207         if (!pool) {
12208                 claim_zero(mlx5_devx_cmd_destroy(obj));
12209                 rte_errno = ENOMEM;
12210                 return NULL;
12211         }
12212         pool->flow_hit_aso_obj = obj;
12213         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
12214         rte_rwlock_write_lock(&mng->resize_rwl);
12215         pool->index = mng->next;
12216         /* Resize pools array if there is no room for the new pool in it. */
12217         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
12218                 claim_zero(mlx5_devx_cmd_destroy(obj));
12219                 mlx5_free(pool);
12220                 rte_rwlock_write_unlock(&mng->resize_rwl);
12221                 return NULL;
12222         }
12223         mng->pools[pool->index] = pool;
12224         mng->next++;
12225         rte_rwlock_write_unlock(&mng->resize_rwl);
12226         /* Assign the first action in the new pool, the rest go to free list. */
12227         *age_free = &pool->actions[0];
12228         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
12229                 pool->actions[i].offset = i;
12230                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
12231         }
12232         return pool;
12233 }
12234
12235 /**
12236  * Allocate a ASO aging bit.
12237  *
12238  * @param[in] dev
12239  *   Pointer to the Ethernet device structure.
12240  * @param[out] error
12241  *   Pointer to the error structure.
12242  *
12243  * @return
12244  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
12245  */
12246 static uint32_t
12247 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12248 {
12249         struct mlx5_priv *priv = dev->data->dev_private;
12250         const struct mlx5_aso_age_pool *pool;
12251         struct mlx5_aso_age_action *age_free = NULL;
12252         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
12253
12254         MLX5_ASSERT(mng);
12255         /* Try to get the next free age action bit. */
12256         rte_spinlock_lock(&mng->free_sl);
12257         age_free = LIST_FIRST(&mng->free);
12258         if (age_free) {
12259                 LIST_REMOVE(age_free, next);
12260         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
12261                 rte_spinlock_unlock(&mng->free_sl);
12262                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12263                                    NULL, "failed to create ASO age pool");
12264                 return 0; /* 0 is an error. */
12265         }
12266         rte_spinlock_unlock(&mng->free_sl);
12267         pool = container_of
12268           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
12269                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
12270                                                                        actions);
12271         if (!age_free->dr_action) {
12272                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
12273                                                  error);
12274
12275                 if (reg_c < 0) {
12276                         rte_flow_error_set(error, rte_errno,
12277                                            RTE_FLOW_ERROR_TYPE_ACTION,
12278                                            NULL, "failed to get reg_c "
12279                                            "for ASO flow hit");
12280                         return 0; /* 0 is an error. */
12281                 }
12282 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
12283                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
12284                                 (priv->sh->rx_domain,
12285                                  pool->flow_hit_aso_obj->obj, age_free->offset,
12286                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
12287                                  (reg_c - REG_C_0));
12288 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
12289                 if (!age_free->dr_action) {
12290                         rte_errno = errno;
12291                         rte_spinlock_lock(&mng->free_sl);
12292                         LIST_INSERT_HEAD(&mng->free, age_free, next);
12293                         rte_spinlock_unlock(&mng->free_sl);
12294                         rte_flow_error_set(error, rte_errno,
12295                                            RTE_FLOW_ERROR_TYPE_ACTION,
12296                                            NULL, "failed to create ASO "
12297                                            "flow hit action");
12298                         return 0; /* 0 is an error. */
12299                 }
12300         }
12301         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
12302         return pool->index | ((age_free->offset + 1) << 16);
12303 }
12304
12305 /**
12306  * Initialize flow ASO age parameters.
12307  *
12308  * @param[in] dev
12309  *   Pointer to rte_eth_dev structure.
12310  * @param[in] age_idx
12311  *   Index of ASO age action.
12312  * @param[in] context
12313  *   Pointer to flow counter age context.
12314  * @param[in] timeout
12315  *   Aging timeout in seconds.
12316  *
12317  */
12318 static void
12319 flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
12320                             uint32_t age_idx,
12321                             void *context,
12322                             uint32_t timeout)
12323 {
12324         struct mlx5_aso_age_action *aso_age;
12325
12326         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
12327         MLX5_ASSERT(aso_age);
12328         aso_age->age_params.context = context;
12329         aso_age->age_params.timeout = timeout;
12330         aso_age->age_params.port_id = dev->data->port_id;
12331         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
12332                          __ATOMIC_RELAXED);
12333         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
12334                          __ATOMIC_RELAXED);
12335 }
12336
12337 static void
12338 flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
12339                                const struct rte_flow_item_integrity *value,
12340                                void *headers_m, void *headers_v)
12341 {
12342         if (mask->l4_ok) {
12343                 /* RTE l4_ok filter aggregates hardware l4_ok and
12344                  * l4_checksum_ok filters.
12345                  * Positive RTE l4_ok match requires hardware match on both L4
12346                  * hardware integrity bits.
12347                  * For negative match, check hardware l4_checksum_ok bit only,
12348                  * because hardware sets that bit to 0 for all packets
12349                  * with bad L4.
12350                  */
12351                 if (value->l4_ok) {
12352                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok, 1);
12353                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok, 1);
12354                 }
12355                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
12356                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
12357                          !!value->l4_ok);
12358         }
12359         if (mask->l4_csum_ok) {
12360                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
12361                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
12362                          value->l4_csum_ok);
12363         }
12364 }
12365
12366 static void
12367 flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
12368                                const struct rte_flow_item_integrity *value,
12369                                void *headers_m, void *headers_v, bool is_ipv4)
12370 {
12371         if (mask->l3_ok) {
12372                 /* RTE l3_ok filter aggregates for IPv4 hardware l3_ok and
12373                  * ipv4_csum_ok filters.
12374                  * Positive RTE l3_ok match requires hardware match on both L3
12375                  * hardware integrity bits.
12376                  * For negative match, check hardware l3_csum_ok bit only,
12377                  * because hardware sets that bit to 0 for all packets
12378                  * with bad L3.
12379                  */
12380                 if (is_ipv4) {
12381                         if (value->l3_ok) {
12382                                 MLX5_SET(fte_match_set_lyr_2_4, headers_m,
12383                                          l3_ok, 1);
12384                                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12385                                          l3_ok, 1);
12386                         }
12387                         MLX5_SET(fte_match_set_lyr_2_4, headers_m,
12388                                  ipv4_checksum_ok, 1);
12389                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12390                                  ipv4_checksum_ok, !!value->l3_ok);
12391                 } else {
12392                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok, 1);
12393                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
12394                                  value->l3_ok);
12395                 }
12396         }
12397         if (mask->ipv4_csum_ok) {
12398                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok, 1);
12399                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
12400                          value->ipv4_csum_ok);
12401         }
12402 }
12403
12404 static void
12405 set_integrity_bits(void *headers_m, void *headers_v,
12406                    const struct rte_flow_item *integrity_item, bool is_l3_ip4)
12407 {
12408         const struct rte_flow_item_integrity *spec = integrity_item->spec;
12409         const struct rte_flow_item_integrity *mask = integrity_item->mask;
12410
12411         /* Integrity bits validation cleared spec pointer */
12412         MLX5_ASSERT(spec != NULL);
12413         if (!mask)
12414                 mask = &rte_flow_item_integrity_mask;
12415         flow_dv_translate_integrity_l3(mask, spec, headers_m, headers_v,
12416                                        is_l3_ip4);
12417         flow_dv_translate_integrity_l4(mask, spec, headers_m, headers_v);
12418 }
12419
12420 static void
12421 flow_dv_translate_item_integrity_post(void *matcher, void *key,
12422                                       const
12423                                       struct rte_flow_item *integrity_items[2],
12424                                       uint64_t pattern_flags)
12425 {
12426         void *headers_m, *headers_v;
12427         bool is_l3_ip4;
12428
12429         if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
12430                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12431                                          inner_headers);
12432                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
12433                 is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4) !=
12434                             0;
12435                 set_integrity_bits(headers_m, headers_v,
12436                                    integrity_items[1], is_l3_ip4);
12437         }
12438         if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
12439                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12440                                          outer_headers);
12441                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
12442                 is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) !=
12443                             0;
12444                 set_integrity_bits(headers_m, headers_v,
12445                                    integrity_items[0], is_l3_ip4);
12446         }
12447 }
12448
12449 static void
12450 flow_dv_translate_item_integrity(const struct rte_flow_item *item,
12451                                  const struct rte_flow_item *integrity_items[2],
12452                                  uint64_t *last_item)
12453 {
12454         const struct rte_flow_item_integrity *spec = (typeof(spec))item->spec;
12455
12456         /* integrity bits validation cleared spec pointer */
12457         MLX5_ASSERT(spec != NULL);
12458         if (spec->level > 1) {
12459                 integrity_items[1] = item;
12460                 *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
12461         } else {
12462                 integrity_items[0] = item;
12463                 *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
12464         }
12465 }
12466
12467 /**
12468  * Prepares DV flow counter with aging configuration.
12469  * Gets it by index when exists, creates a new one when doesn't.
12470  *
12471  * @param[in] dev
12472  *   Pointer to rte_eth_dev structure.
12473  * @param[in] dev_flow
12474  *   Pointer to the mlx5_flow.
12475  * @param[in, out] flow
12476  *   Pointer to the sub flow.
12477  * @param[in] count
12478  *   Pointer to the counter action configuration.
12479  * @param[in] age
12480  *   Pointer to the aging action configuration.
12481  * @param[out] error
12482  *   Pointer to the error structure.
12483  *
12484  * @return
12485  *   Pointer to the counter, NULL otherwise.
12486  */
12487 static struct mlx5_flow_counter *
12488 flow_dv_prepare_counter(struct rte_eth_dev *dev,
12489                         struct mlx5_flow *dev_flow,
12490                         struct rte_flow *flow,
12491                         const struct rte_flow_action_count *count,
12492                         const struct rte_flow_action_age *age,
12493                         struct rte_flow_error *error)
12494 {
12495         if (!flow->counter) {
12496                 flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
12497                                                                  count, age);
12498                 if (!flow->counter) {
12499                         rte_flow_error_set(error, rte_errno,
12500                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12501                                            "cannot create counter object.");
12502                         return NULL;
12503                 }
12504         }
12505         return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
12506 }
12507
12508 /*
12509  * Release an ASO CT action by its own device.
12510  *
12511  * @param[in] dev
12512  *   Pointer to the Ethernet device structure.
12513  * @param[in] idx
12514  *   Index of ASO CT action to release.
12515  *
12516  * @return
12517  *   0 when CT action was removed, otherwise the number of references.
12518  */
12519 static inline int
12520 flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx)
12521 {
12522         struct mlx5_priv *priv = dev->data->dev_private;
12523         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12524         uint32_t ret;
12525         struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12526         enum mlx5_aso_ct_state state =
12527                         __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
12528
12529         /* Cannot release when CT is in the ASO SQ. */
12530         if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
12531                 return -1;
12532         ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED);
12533         if (!ret) {
12534                 if (ct->dr_action_orig) {
12535 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12536                         claim_zero(mlx5_glue->destroy_flow_action
12537                                         (ct->dr_action_orig));
12538 #endif
12539                         ct->dr_action_orig = NULL;
12540                 }
12541                 if (ct->dr_action_rply) {
12542 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12543                         claim_zero(mlx5_glue->destroy_flow_action
12544                                         (ct->dr_action_rply));
12545 #endif
12546                         ct->dr_action_rply = NULL;
12547                 }
12548                 /* Clear the state to free, no need in 1st allocation. */
12549                 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE);
12550                 rte_spinlock_lock(&mng->ct_sl);
12551                 LIST_INSERT_HEAD(&mng->free_cts, ct, next);
12552                 rte_spinlock_unlock(&mng->ct_sl);
12553         }
12554         return (int)ret;
12555 }
12556
12557 static inline int
12558 flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx,
12559                        struct rte_flow_error *error)
12560 {
12561         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
12562         uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
12563         struct rte_eth_dev *owndev = &rte_eth_devices[owner];
12564         int ret;
12565
12566         MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
12567         if (dev->data->dev_started != 1)
12568                 return rte_flow_error_set(error, EAGAIN,
12569                                           RTE_FLOW_ERROR_TYPE_ACTION,
12570                                           NULL,
12571                                           "Indirect CT action cannot be destroyed when the port is stopped");
12572         ret = flow_dv_aso_ct_dev_release(owndev, idx);
12573         if (ret < 0)
12574                 return rte_flow_error_set(error, EAGAIN,
12575                                           RTE_FLOW_ERROR_TYPE_ACTION,
12576                                           NULL,
12577                                           "Current state prevents indirect CT action from being destroyed");
12578         return ret;
12579 }
12580
12581 /*
12582  * Resize the ASO CT pools array by 64 pools.
12583  *
12584  * @param[in] dev
12585  *   Pointer to the Ethernet device structure.
12586  *
12587  * @return
12588  *   0 on success, otherwise negative errno value and rte_errno is set.
12589  */
12590 static int
12591 flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev)
12592 {
12593         struct mlx5_priv *priv = dev->data->dev_private;
12594         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12595         void *old_pools = mng->pools;
12596         /* Magic number now, need a macro. */
12597         uint32_t resize = mng->n + 64;
12598         uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize;
12599         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
12600
12601         if (!pools) {
12602                 rte_errno = ENOMEM;
12603                 return -rte_errno;
12604         }
12605         rte_rwlock_write_lock(&mng->resize_rwl);
12606         /* ASO SQ/QP was already initialized in the startup. */
12607         if (old_pools) {
12608                 /* Realloc could be an alternative choice. */
12609                 rte_memcpy(pools, old_pools,
12610                            mng->n * sizeof(struct mlx5_aso_ct_pool *));
12611                 mlx5_free(old_pools);
12612         }
12613         mng->n = resize;
12614         mng->pools = pools;
12615         rte_rwlock_write_unlock(&mng->resize_rwl);
12616         return 0;
12617 }
12618
12619 /*
12620  * Create and initialize a new ASO CT pool.
12621  *
12622  * @param[in] dev
12623  *   Pointer to the Ethernet device structure.
12624  * @param[out] ct_free
12625  *   Where to put the pointer of a new CT action.
12626  *
12627  * @return
12628  *   The CT actions pool pointer and @p ct_free is set on success,
12629  *   NULL otherwise and rte_errno is set.
12630  */
12631 static struct mlx5_aso_ct_pool *
12632 flow_dv_ct_pool_create(struct rte_eth_dev *dev,
12633                        struct mlx5_aso_ct_action **ct_free)
12634 {
12635         struct mlx5_priv *priv = dev->data->dev_private;
12636         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12637         struct mlx5_aso_ct_pool *pool = NULL;
12638         struct mlx5_devx_obj *obj = NULL;
12639         uint32_t i;
12640         uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
12641
12642         obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
12643                                                           priv->sh->cdev->pdn,
12644                                                           log_obj_size);
12645         if (!obj) {
12646                 rte_errno = ENODATA;
12647                 DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
12648                 return NULL;
12649         }
12650         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
12651         if (!pool) {
12652                 rte_errno = ENOMEM;
12653                 claim_zero(mlx5_devx_cmd_destroy(obj));
12654                 return NULL;
12655         }
12656         pool->devx_obj = obj;
12657         pool->index = mng->next;
12658         /* Resize pools array if there is no room for the new pool in it. */
12659         if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) {
12660                 claim_zero(mlx5_devx_cmd_destroy(obj));
12661                 mlx5_free(pool);
12662                 return NULL;
12663         }
12664         mng->pools[pool->index] = pool;
12665         mng->next++;
12666         /* Assign the first action in the new pool, the rest go to free list. */
12667         *ct_free = &pool->actions[0];
12668         /* Lock outside, the list operation is safe here. */
12669         for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
12670                 /* refcnt is 0 when allocating the memory. */
12671                 pool->actions[i].offset = i;
12672                 LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next);
12673         }
12674         return pool;
12675 }
12676
12677 /*
12678  * Allocate a ASO CT action from free list.
12679  *
12680  * @param[in] dev
12681  *   Pointer to the Ethernet device structure.
12682  * @param[out] error
12683  *   Pointer to the error structure.
12684  *
12685  * @return
12686  *   Index to ASO CT action on success, 0 otherwise and rte_errno is set.
12687  */
12688 static uint32_t
12689 flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12690 {
12691         struct mlx5_priv *priv = dev->data->dev_private;
12692         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12693         struct mlx5_aso_ct_action *ct = NULL;
12694         struct mlx5_aso_ct_pool *pool;
12695         uint8_t reg_c;
12696         uint32_t ct_idx;
12697
12698         MLX5_ASSERT(mng);
12699         if (!priv->sh->cdev->config.devx) {
12700                 rte_errno = ENOTSUP;
12701                 return 0;
12702         }
12703         /* Get a free CT action, if no, a new pool will be created. */
12704         rte_spinlock_lock(&mng->ct_sl);
12705         ct = LIST_FIRST(&mng->free_cts);
12706         if (ct) {
12707                 LIST_REMOVE(ct, next);
12708         } else if (!flow_dv_ct_pool_create(dev, &ct)) {
12709                 rte_spinlock_unlock(&mng->ct_sl);
12710                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12711                                    NULL, "failed to create ASO CT pool");
12712                 return 0;
12713         }
12714         rte_spinlock_unlock(&mng->ct_sl);
12715         pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
12716         ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
12717         /* 0: inactive, 1: created, 2+: used by flows. */
12718         __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
12719         reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
12720         if (!ct->dr_action_orig) {
12721 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12722                 ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso
12723                         (priv->sh->rx_domain, pool->devx_obj->obj,
12724                          ct->offset,
12725                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR,
12726                          reg_c - REG_C_0);
12727 #else
12728                 RTE_SET_USED(reg_c);
12729 #endif
12730                 if (!ct->dr_action_orig) {
12731                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12732                         rte_flow_error_set(error, rte_errno,
12733                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12734                                            "failed to create ASO CT action");
12735                         return 0;
12736                 }
12737         }
12738         if (!ct->dr_action_rply) {
12739 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12740                 ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso
12741                         (priv->sh->rx_domain, pool->devx_obj->obj,
12742                          ct->offset,
12743                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER,
12744                          reg_c - REG_C_0);
12745 #endif
12746                 if (!ct->dr_action_rply) {
12747                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12748                         rte_flow_error_set(error, rte_errno,
12749                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12750                                            "failed to create ASO CT action");
12751                         return 0;
12752                 }
12753         }
12754         return ct_idx;
12755 }
12756
12757 /*
12758  * Create a conntrack object with context and actions by using ASO mechanism.
12759  *
12760  * @param[in] dev
12761  *   Pointer to rte_eth_dev structure.
12762  * @param[in] pro
12763  *   Pointer to conntrack information profile.
12764  * @param[out] error
12765  *   Pointer to the error structure.
12766  *
12767  * @return
12768  *   Index to conntrack object on success, 0 otherwise.
12769  */
12770 static uint32_t
12771 flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
12772                                    const struct rte_flow_action_conntrack *pro,
12773                                    struct rte_flow_error *error)
12774 {
12775         struct mlx5_priv *priv = dev->data->dev_private;
12776         struct mlx5_dev_ctx_shared *sh = priv->sh;
12777         struct mlx5_aso_ct_action *ct;
12778         uint32_t idx;
12779
12780         if (!sh->ct_aso_en)
12781                 return rte_flow_error_set(error, ENOTSUP,
12782                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12783                                           "Connection is not supported");
12784         idx = flow_dv_aso_ct_alloc(dev, error);
12785         if (!idx)
12786                 return rte_flow_error_set(error, rte_errno,
12787                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12788                                           "Failed to allocate CT object");
12789         ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12790         if (mlx5_aso_ct_update_by_wqe(sh, ct, pro))
12791                 return rte_flow_error_set(error, EBUSY,
12792                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12793                                           "Failed to update CT");
12794         ct->is_original = !!pro->is_original_dir;
12795         ct->peer = pro->peer_port;
12796         return idx;
12797 }
12798
12799 /**
12800  * Fill the flow with DV spec, lock free
12801  * (mutex should be acquired by caller).
12802  *
12803  * @param[in] dev
12804  *   Pointer to rte_eth_dev structure.
12805  * @param[in, out] dev_flow
12806  *   Pointer to the sub flow.
12807  * @param[in] attr
12808  *   Pointer to the flow attributes.
12809  * @param[in] items
12810  *   Pointer to the list of items.
12811  * @param[in] actions
12812  *   Pointer to the list of actions.
12813  * @param[out] error
12814  *   Pointer to the error structure.
12815  *
12816  * @return
12817  *   0 on success, a negative errno value otherwise and rte_errno is set.
12818  */
12819 static int
12820 flow_dv_translate(struct rte_eth_dev *dev,
12821                   struct mlx5_flow *dev_flow,
12822                   const struct rte_flow_attr *attr,
12823                   const struct rte_flow_item items[],
12824                   const struct rte_flow_action actions[],
12825                   struct rte_flow_error *error)
12826 {
12827         struct mlx5_priv *priv = dev->data->dev_private;
12828         struct mlx5_sh_config *dev_conf = &priv->sh->config;
12829         struct rte_flow *flow = dev_flow->flow;
12830         struct mlx5_flow_handle *handle = dev_flow->handle;
12831         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12832         struct mlx5_flow_rss_desc *rss_desc;
12833         uint64_t item_flags = 0;
12834         uint64_t last_item = 0;
12835         uint64_t action_flags = 0;
12836         struct mlx5_flow_dv_matcher matcher = {
12837                 .mask = {
12838                         .size = sizeof(matcher.mask.buf),
12839                 },
12840         };
12841         int actions_n = 0;
12842         bool actions_end = false;
12843         union {
12844                 struct mlx5_flow_dv_modify_hdr_resource res;
12845                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
12846                             sizeof(struct mlx5_modification_cmd) *
12847                             (MLX5_MAX_MODIFY_NUM + 1)];
12848         } mhdr_dummy;
12849         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
12850         const struct rte_flow_action_count *count = NULL;
12851         const struct rte_flow_action_age *non_shared_age = NULL;
12852         union flow_dv_attr flow_attr = { .attr = 0 };
12853         uint32_t tag_be;
12854         union mlx5_flow_tbl_key tbl_key;
12855         uint32_t modify_action_position = UINT32_MAX;
12856         void *match_mask = matcher.mask.buf;
12857         void *match_value = dev_flow->dv.value.buf;
12858         uint8_t next_protocol = 0xff;
12859         struct rte_vlan_hdr vlan = { 0 };
12860         struct mlx5_flow_dv_dest_array_resource mdest_res;
12861         struct mlx5_flow_dv_sample_resource sample_res;
12862         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
12863         const struct rte_flow_action_sample *sample = NULL;
12864         struct mlx5_flow_sub_actions_list *sample_act;
12865         uint32_t sample_act_pos = UINT32_MAX;
12866         uint32_t age_act_pos = UINT32_MAX;
12867         uint32_t num_of_dest = 0;
12868         int tmp_actions_n = 0;
12869         uint32_t table;
12870         int ret = 0;
12871         const struct mlx5_flow_tunnel *tunnel = NULL;
12872         struct flow_grp_info grp_info = {
12873                 .external = !!dev_flow->external,
12874                 .transfer = !!attr->transfer,
12875                 .fdb_def_rule = !!priv->fdb_def_rule,
12876                 .skip_scale = dev_flow->skip_scale &
12877                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
12878                 .std_tbl_fix = true,
12879         };
12880         const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
12881         const struct rte_flow_item *tunnel_item = NULL;
12882         const struct rte_flow_item *gre_item = NULL;
12883
12884         if (!wks)
12885                 return rte_flow_error_set(error, ENOMEM,
12886                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12887                                           NULL,
12888                                           "failed to push flow workspace");
12889         rss_desc = &wks->rss_desc;
12890         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
12891         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
12892         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12893                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12894         /* update normal path action resource into last index of array */
12895         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
12896         if (is_tunnel_offload_active(dev)) {
12897                 if (dev_flow->tunnel) {
12898                         RTE_VERIFY(dev_flow->tof_type ==
12899                                    MLX5_TUNNEL_OFFLOAD_MISS_RULE);
12900                         tunnel = dev_flow->tunnel;
12901                 } else {
12902                         tunnel = mlx5_get_tof(items, actions,
12903                                               &dev_flow->tof_type);
12904                         dev_flow->tunnel = tunnel;
12905                 }
12906                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
12907                                         (dev, attr, tunnel, dev_flow->tof_type);
12908         }
12909         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12910                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12911         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
12912                                        &grp_info, error);
12913         if (ret)
12914                 return ret;
12915         dev_flow->dv.group = table;
12916         if (attr->transfer)
12917                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
12918         /* number of actions must be set to 0 in case of dirty stack. */
12919         mhdr_res->actions_num = 0;
12920         if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
12921                 /*
12922                  * do not add decap action if match rule drops packet
12923                  * HW rejects rules with decap & drop
12924                  *
12925                  * if tunnel match rule was inserted before matching tunnel set
12926                  * rule flow table used in the match rule must be registered.
12927                  * current implementation handles that in the
12928                  * flow_dv_match_register() at the function end.
12929                  */
12930                 bool add_decap = true;
12931                 const struct rte_flow_action *ptr = actions;
12932
12933                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
12934                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
12935                                 add_decap = false;
12936                                 break;
12937                         }
12938                 }
12939                 if (add_decap) {
12940                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12941                                                            attr->transfer,
12942                                                            error))
12943                                 return -rte_errno;
12944                         dev_flow->dv.actions[actions_n++] =
12945                                         dev_flow->dv.encap_decap->action;
12946                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12947                 }
12948         }
12949         for (; !actions_end ; actions++) {
12950                 const struct rte_flow_action_queue *queue;
12951                 const struct rte_flow_action_rss *rss;
12952                 const struct rte_flow_action *action = actions;
12953                 const uint8_t *rss_key;
12954                 struct mlx5_flow_tbl_resource *tbl;
12955                 struct mlx5_aso_age_action *age_act;
12956                 struct mlx5_flow_counter *cnt_act;
12957                 uint32_t port_id = 0;
12958                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
12959                 int action_type = actions->type;
12960                 const struct rte_flow_action *found_action = NULL;
12961                 uint32_t jump_group = 0;
12962                 uint32_t owner_idx;
12963                 struct mlx5_aso_ct_action *ct;
12964
12965                 if (!mlx5_flow_os_action_supported(action_type))
12966                         return rte_flow_error_set(error, ENOTSUP,
12967                                                   RTE_FLOW_ERROR_TYPE_ACTION,
12968                                                   actions,
12969                                                   "action not supported");
12970                 switch (action_type) {
12971                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
12972                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
12973                         break;
12974                 case RTE_FLOW_ACTION_TYPE_VOID:
12975                         break;
12976                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
12977                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
12978                         if (flow_dv_translate_action_port_id(dev, action,
12979                                                              &port_id, error))
12980                                 return -rte_errno;
12981                         port_id_resource.port_id = port_id;
12982                         MLX5_ASSERT(!handle->rix_port_id_action);
12983                         if (flow_dv_port_id_action_resource_register
12984                             (dev, &port_id_resource, dev_flow, error))
12985                                 return -rte_errno;
12986                         dev_flow->dv.actions[actions_n++] =
12987                                         dev_flow->dv.port_id_action->action;
12988                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12989                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
12990                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12991                         num_of_dest++;
12992                         break;
12993                 case RTE_FLOW_ACTION_TYPE_FLAG:
12994                         action_flags |= MLX5_FLOW_ACTION_FLAG;
12995                         wks->mark = 1;
12996                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12997                                 struct rte_flow_action_mark mark = {
12998                                         .id = MLX5_FLOW_MARK_DEFAULT,
12999                                 };
13000
13001                                 if (flow_dv_convert_action_mark(dev, &mark,
13002                                                                 mhdr_res,
13003                                                                 error))
13004                                         return -rte_errno;
13005                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
13006                                 break;
13007                         }
13008                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
13009                         /*
13010                          * Only one FLAG or MARK is supported per device flow
13011                          * right now. So the pointer to the tag resource must be
13012                          * zero before the register process.
13013                          */
13014                         MLX5_ASSERT(!handle->dvh.rix_tag);
13015                         if (flow_dv_tag_resource_register(dev, tag_be,
13016                                                           dev_flow, error))
13017                                 return -rte_errno;
13018                         MLX5_ASSERT(dev_flow->dv.tag_resource);
13019                         dev_flow->dv.actions[actions_n++] =
13020                                         dev_flow->dv.tag_resource->action;
13021                         break;
13022                 case RTE_FLOW_ACTION_TYPE_MARK:
13023                         action_flags |= MLX5_FLOW_ACTION_MARK;
13024                         wks->mark = 1;
13025                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
13026                                 const struct rte_flow_action_mark *mark =
13027                                         (const struct rte_flow_action_mark *)
13028                                                 actions->conf;
13029
13030                                 if (flow_dv_convert_action_mark(dev, mark,
13031                                                                 mhdr_res,
13032                                                                 error))
13033                                         return -rte_errno;
13034                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
13035                                 break;
13036                         }
13037                         /* Fall-through */
13038                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
13039                         /* Legacy (non-extensive) MARK action. */
13040                         tag_be = mlx5_flow_mark_set
13041                               (((const struct rte_flow_action_mark *)
13042                                (actions->conf))->id);
13043                         MLX5_ASSERT(!handle->dvh.rix_tag);
13044                         if (flow_dv_tag_resource_register(dev, tag_be,
13045                                                           dev_flow, error))
13046                                 return -rte_errno;
13047                         MLX5_ASSERT(dev_flow->dv.tag_resource);
13048                         dev_flow->dv.actions[actions_n++] =
13049                                         dev_flow->dv.tag_resource->action;
13050                         break;
13051                 case RTE_FLOW_ACTION_TYPE_SET_META:
13052                         if (flow_dv_convert_action_set_meta
13053                                 (dev, mhdr_res, attr,
13054                                  (const struct rte_flow_action_set_meta *)
13055                                   actions->conf, error))
13056                                 return -rte_errno;
13057                         action_flags |= MLX5_FLOW_ACTION_SET_META;
13058                         break;
13059                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
13060                         if (flow_dv_convert_action_set_tag
13061                                 (dev, mhdr_res,
13062                                  (const struct rte_flow_action_set_tag *)
13063                                   actions->conf, error))
13064                                 return -rte_errno;
13065                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13066                         break;
13067                 case RTE_FLOW_ACTION_TYPE_DROP:
13068                         action_flags |= MLX5_FLOW_ACTION_DROP;
13069                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
13070                         break;
13071                 case RTE_FLOW_ACTION_TYPE_QUEUE:
13072                         queue = actions->conf;
13073                         rss_desc->queue_num = 1;
13074                         rss_desc->queue[0] = queue->index;
13075                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
13076                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
13077                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
13078                         num_of_dest++;
13079                         break;
13080                 case RTE_FLOW_ACTION_TYPE_RSS:
13081                         rss = actions->conf;
13082                         memcpy(rss_desc->queue, rss->queue,
13083                                rss->queue_num * sizeof(uint16_t));
13084                         rss_desc->queue_num = rss->queue_num;
13085                         /* NULL RSS key indicates default RSS key. */
13086                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
13087                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
13088                         /*
13089                          * rss->level and rss.types should be set in advance
13090                          * when expanding items for RSS.
13091                          */
13092                         action_flags |= MLX5_FLOW_ACTION_RSS;
13093                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
13094                                 MLX5_FLOW_FATE_SHARED_RSS :
13095                                 MLX5_FLOW_FATE_QUEUE;
13096                         break;
13097                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
13098                         owner_idx = (uint32_t)(uintptr_t)action->conf;
13099                         age_act = flow_aso_age_get_by_idx(dev, owner_idx);
13100                         if (flow->age == 0) {
13101                                 flow->age = owner_idx;
13102                                 __atomic_fetch_add(&age_act->refcnt, 1,
13103                                                    __ATOMIC_RELAXED);
13104                         }
13105                         age_act_pos = actions_n++;
13106                         action_flags |= MLX5_FLOW_ACTION_AGE;
13107                         break;
13108                 case RTE_FLOW_ACTION_TYPE_AGE:
13109                         non_shared_age = action->conf;
13110                         age_act_pos = actions_n++;
13111                         action_flags |= MLX5_FLOW_ACTION_AGE;
13112                         break;
13113                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
13114                         owner_idx = (uint32_t)(uintptr_t)action->conf;
13115                         cnt_act = flow_dv_counter_get_by_idx(dev, owner_idx,
13116                                                              NULL);
13117                         MLX5_ASSERT(cnt_act != NULL);
13118                         /**
13119                          * When creating meter drop flow in drop table, the
13120                          * counter should not overwrite the rte flow counter.
13121                          */
13122                         if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
13123                             dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP) {
13124                                 dev_flow->dv.actions[actions_n++] =
13125                                                         cnt_act->action;
13126                         } else {
13127                                 if (flow->counter == 0) {
13128                                         flow->counter = owner_idx;
13129                                         __atomic_fetch_add
13130                                                 (&cnt_act->shared_info.refcnt,
13131                                                  1, __ATOMIC_RELAXED);
13132                                 }
13133                                 /* Save information first, will apply later. */
13134                                 action_flags |= MLX5_FLOW_ACTION_COUNT;
13135                         }
13136                         break;
13137                 case RTE_FLOW_ACTION_TYPE_COUNT:
13138                         if (!priv->sh->cdev->config.devx) {
13139                                 return rte_flow_error_set
13140                                               (error, ENOTSUP,
13141                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13142                                                NULL,
13143                                                "count action not supported");
13144                         }
13145                         /* Save information first, will apply later. */
13146                         count = action->conf;
13147                         action_flags |= MLX5_FLOW_ACTION_COUNT;
13148                         break;
13149                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
13150                         dev_flow->dv.actions[actions_n++] =
13151                                                 priv->sh->pop_vlan_action;
13152                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
13153                         break;
13154                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
13155                         if (!(action_flags &
13156                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
13157                                 flow_dev_get_vlan_info_from_items(items, &vlan);
13158                         vlan.eth_proto = rte_be_to_cpu_16
13159                              ((((const struct rte_flow_action_of_push_vlan *)
13160                                                    actions->conf)->ethertype));
13161                         found_action = mlx5_flow_find_action
13162                                         (actions + 1,
13163                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
13164                         if (found_action)
13165                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
13166                         found_action = mlx5_flow_find_action
13167                                         (actions + 1,
13168                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
13169                         if (found_action)
13170                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
13171                         if (flow_dv_create_action_push_vlan
13172                                             (dev, attr, &vlan, dev_flow, error))
13173                                 return -rte_errno;
13174                         dev_flow->dv.actions[actions_n++] =
13175                                         dev_flow->dv.push_vlan_res->action;
13176                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
13177                         break;
13178                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
13179                         /* of_vlan_push action handled this action */
13180                         MLX5_ASSERT(action_flags &
13181                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
13182                         break;
13183                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
13184                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
13185                                 break;
13186                         flow_dev_get_vlan_info_from_items(items, &vlan);
13187                         mlx5_update_vlan_vid_pcp(actions, &vlan);
13188                         /* If no VLAN push - this is a modify header action */
13189                         if (flow_dv_convert_action_modify_vlan_vid
13190                                                 (mhdr_res, actions, error))
13191                                 return -rte_errno;
13192                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
13193                         break;
13194                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
13195                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
13196                         if (flow_dv_create_action_l2_encap(dev, actions,
13197                                                            dev_flow,
13198                                                            attr->transfer,
13199                                                            error))
13200                                 return -rte_errno;
13201                         dev_flow->dv.actions[actions_n++] =
13202                                         dev_flow->dv.encap_decap->action;
13203                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
13204                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
13205                                 sample_act->action_flags |=
13206                                                         MLX5_FLOW_ACTION_ENCAP;
13207                         break;
13208                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
13209                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
13210                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
13211                                                            attr->transfer,
13212                                                            error))
13213                                 return -rte_errno;
13214                         dev_flow->dv.actions[actions_n++] =
13215                                         dev_flow->dv.encap_decap->action;
13216                         action_flags |= MLX5_FLOW_ACTION_DECAP;
13217                         break;
13218                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
13219                         /* Handle encap with preceding decap. */
13220                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
13221                                 if (flow_dv_create_action_raw_encap
13222                                         (dev, actions, dev_flow, attr, error))
13223                                         return -rte_errno;
13224                                 dev_flow->dv.actions[actions_n++] =
13225                                         dev_flow->dv.encap_decap->action;
13226                         } else {
13227                                 /* Handle encap without preceding decap. */
13228                                 if (flow_dv_create_action_l2_encap
13229                                     (dev, actions, dev_flow, attr->transfer,
13230                                      error))
13231                                         return -rte_errno;
13232                                 dev_flow->dv.actions[actions_n++] =
13233                                         dev_flow->dv.encap_decap->action;
13234                         }
13235                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
13236                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
13237                                 sample_act->action_flags |=
13238                                                         MLX5_FLOW_ACTION_ENCAP;
13239                         break;
13240                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
13241                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
13242                                 ;
13243                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
13244                                 if (flow_dv_create_action_l2_decap
13245                                     (dev, dev_flow, attr->transfer, error))
13246                                         return -rte_errno;
13247                                 dev_flow->dv.actions[actions_n++] =
13248                                         dev_flow->dv.encap_decap->action;
13249                         }
13250                         /* If decap is followed by encap, handle it at encap. */
13251                         action_flags |= MLX5_FLOW_ACTION_DECAP;
13252                         break;
13253                 case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
13254                         dev_flow->dv.actions[actions_n++] =
13255                                 (void *)(uintptr_t)action->conf;
13256                         action_flags |= MLX5_FLOW_ACTION_JUMP;
13257                         break;
13258                 case RTE_FLOW_ACTION_TYPE_JUMP:
13259                         jump_group = ((const struct rte_flow_action_jump *)
13260                                                         action->conf)->group;
13261                         grp_info.std_tbl_fix = 0;
13262                         if (dev_flow->skip_scale &
13263                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
13264                                 grp_info.skip_scale = 1;
13265                         else
13266                                 grp_info.skip_scale = 0;
13267                         ret = mlx5_flow_group_to_table(dev, tunnel,
13268                                                        jump_group,
13269                                                        &table,
13270                                                        &grp_info, error);
13271                         if (ret)
13272                                 return ret;
13273                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
13274                                                        attr->transfer,
13275                                                        !!dev_flow->external,
13276                                                        tunnel, jump_group, 0,
13277                                                        0, error);
13278                         if (!tbl)
13279                                 return rte_flow_error_set
13280                                                 (error, errno,
13281                                                  RTE_FLOW_ERROR_TYPE_ACTION,
13282                                                  NULL,
13283                                                  "cannot create jump action.");
13284                         if (flow_dv_jump_tbl_resource_register
13285                             (dev, tbl, dev_flow, error)) {
13286                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
13287                                 return rte_flow_error_set
13288                                                 (error, errno,
13289                                                  RTE_FLOW_ERROR_TYPE_ACTION,
13290                                                  NULL,
13291                                                  "cannot create jump action.");
13292                         }
13293                         dev_flow->dv.actions[actions_n++] =
13294                                         dev_flow->dv.jump->action;
13295                         action_flags |= MLX5_FLOW_ACTION_JUMP;
13296                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
13297                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
13298                         num_of_dest++;
13299                         break;
13300                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
13301                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
13302                         if (flow_dv_convert_action_modify_mac
13303                                         (mhdr_res, actions, error))
13304                                 return -rte_errno;
13305                         action_flags |= actions->type ==
13306                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
13307                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
13308                                         MLX5_FLOW_ACTION_SET_MAC_DST;
13309                         break;
13310                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
13311                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
13312                         if (flow_dv_convert_action_modify_ipv4
13313                                         (mhdr_res, actions, error))
13314                                 return -rte_errno;
13315                         action_flags |= actions->type ==
13316                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
13317                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
13318                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
13319                         break;
13320                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
13321                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
13322                         if (flow_dv_convert_action_modify_ipv6
13323                                         (mhdr_res, actions, error))
13324                                 return -rte_errno;
13325                         action_flags |= actions->type ==
13326                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
13327                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
13328                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
13329                         break;
13330                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
13331                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
13332                         if (flow_dv_convert_action_modify_tp
13333                                         (mhdr_res, actions, items,
13334                                          &flow_attr, dev_flow, !!(action_flags &
13335                                          MLX5_FLOW_ACTION_DECAP), error))
13336                                 return -rte_errno;
13337                         action_flags |= actions->type ==
13338                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
13339                                         MLX5_FLOW_ACTION_SET_TP_SRC :
13340                                         MLX5_FLOW_ACTION_SET_TP_DST;
13341                         break;
13342                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
13343                         if (flow_dv_convert_action_modify_dec_ttl
13344                                         (mhdr_res, items, &flow_attr, dev_flow,
13345                                          !!(action_flags &
13346                                          MLX5_FLOW_ACTION_DECAP), error))
13347                                 return -rte_errno;
13348                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
13349                         break;
13350                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
13351                         if (flow_dv_convert_action_modify_ttl
13352                                         (mhdr_res, actions, items, &flow_attr,
13353                                          dev_flow, !!(action_flags &
13354                                          MLX5_FLOW_ACTION_DECAP), error))
13355                                 return -rte_errno;
13356                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
13357                         break;
13358                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
13359                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
13360                         if (flow_dv_convert_action_modify_tcp_seq
13361                                         (mhdr_res, actions, error))
13362                                 return -rte_errno;
13363                         action_flags |= actions->type ==
13364                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
13365                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
13366                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
13367                         break;
13368
13369                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
13370                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
13371                         if (flow_dv_convert_action_modify_tcp_ack
13372                                         (mhdr_res, actions, error))
13373                                 return -rte_errno;
13374                         action_flags |= actions->type ==
13375                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
13376                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
13377                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
13378                         break;
13379                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
13380                         if (flow_dv_convert_action_set_reg
13381                                         (mhdr_res, actions, error))
13382                                 return -rte_errno;
13383                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13384                         break;
13385                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
13386                         if (flow_dv_convert_action_copy_mreg
13387                                         (dev, mhdr_res, actions, error))
13388                                 return -rte_errno;
13389                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13390                         break;
13391                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
13392                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
13393                         dev_flow->handle->fate_action =
13394                                         MLX5_FLOW_FATE_DEFAULT_MISS;
13395                         break;
13396                 case RTE_FLOW_ACTION_TYPE_METER:
13397                         if (!wks->fm)
13398                                 return rte_flow_error_set(error, rte_errno,
13399                                         RTE_FLOW_ERROR_TYPE_ACTION,
13400                                         NULL, "Failed to get meter in flow.");
13401                         /* Set the meter action. */
13402                         dev_flow->dv.actions[actions_n++] =
13403                                 wks->fm->meter_action;
13404                         action_flags |= MLX5_FLOW_ACTION_METER;
13405                         break;
13406                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
13407                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
13408                                                               actions, error))
13409                                 return -rte_errno;
13410                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
13411                         break;
13412                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
13413                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
13414                                                               actions, error))
13415                                 return -rte_errno;
13416                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
13417                         break;
13418                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
13419                         sample_act_pos = actions_n;
13420                         sample = (const struct rte_flow_action_sample *)
13421                                  action->conf;
13422                         actions_n++;
13423                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
13424                         /* put encap action into group if work with port id */
13425                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
13426                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
13427                                 sample_act->action_flags |=
13428                                                         MLX5_FLOW_ACTION_ENCAP;
13429                         break;
13430                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
13431                         if (flow_dv_convert_action_modify_field
13432                                         (dev, mhdr_res, actions, attr, error))
13433                                 return -rte_errno;
13434                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
13435                         break;
13436                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
13437                         owner_idx = (uint32_t)(uintptr_t)action->conf;
13438                         ct = flow_aso_ct_get_by_idx(dev, owner_idx);
13439                         if (!ct)
13440                                 return rte_flow_error_set(error, EINVAL,
13441                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13442                                                 NULL,
13443                                                 "Failed to get CT object.");
13444                         if (mlx5_aso_ct_available(priv->sh, ct))
13445                                 return rte_flow_error_set(error, rte_errno,
13446                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13447                                                 NULL,
13448                                                 "CT is unavailable.");
13449                         if (ct->is_original)
13450                                 dev_flow->dv.actions[actions_n] =
13451                                                         ct->dr_action_orig;
13452                         else
13453                                 dev_flow->dv.actions[actions_n] =
13454                                                         ct->dr_action_rply;
13455                         if (flow->ct == 0) {
13456                                 flow->indirect_type =
13457                                                 MLX5_INDIRECT_ACTION_TYPE_CT;
13458                                 flow->ct = owner_idx;
13459                                 __atomic_fetch_add(&ct->refcnt, 1,
13460                                                    __ATOMIC_RELAXED);
13461                         }
13462                         actions_n++;
13463                         action_flags |= MLX5_FLOW_ACTION_CT;
13464                         break;
13465                 case RTE_FLOW_ACTION_TYPE_END:
13466                         actions_end = true;
13467                         if (mhdr_res->actions_num) {
13468                                 /* create modify action if needed. */
13469                                 if (flow_dv_modify_hdr_resource_register
13470                                         (dev, mhdr_res, dev_flow, error))
13471                                         return -rte_errno;
13472                                 dev_flow->dv.actions[modify_action_position] =
13473                                         handle->dvh.modify_hdr->action;
13474                         }
13475                         /*
13476                          * Handle AGE and COUNT action by single HW counter
13477                          * when they are not shared.
13478                          */
13479                         if (action_flags & MLX5_FLOW_ACTION_AGE) {
13480                                 if ((non_shared_age && count) ||
13481                                     !flow_hit_aso_supported(priv->sh, attr)) {
13482                                         /* Creates age by counters. */
13483                                         cnt_act = flow_dv_prepare_counter
13484                                                                 (dev, dev_flow,
13485                                                                  flow, count,
13486                                                                  non_shared_age,
13487                                                                  error);
13488                                         if (!cnt_act)
13489                                                 return -rte_errno;
13490                                         dev_flow->dv.actions[age_act_pos] =
13491                                                                 cnt_act->action;
13492                                         break;
13493                                 }
13494                                 if (!flow->age && non_shared_age) {
13495                                         flow->age = flow_dv_aso_age_alloc
13496                                                                 (dev, error);
13497                                         if (!flow->age)
13498                                                 return -rte_errno;
13499                                         flow_dv_aso_age_params_init
13500                                                     (dev, flow->age,
13501                                                      non_shared_age->context ?
13502                                                      non_shared_age->context :
13503                                                      (void *)(uintptr_t)
13504                                                      (dev_flow->flow_idx),
13505                                                      non_shared_age->timeout);
13506                                 }
13507                                 age_act = flow_aso_age_get_by_idx(dev,
13508                                                                   flow->age);
13509                                 dev_flow->dv.actions[age_act_pos] =
13510                                                              age_act->dr_action;
13511                         }
13512                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
13513                                 /*
13514                                  * Create one count action, to be used
13515                                  * by all sub-flows.
13516                                  */
13517                                 cnt_act = flow_dv_prepare_counter(dev, dev_flow,
13518                                                                   flow, count,
13519                                                                   NULL, error);
13520                                 if (!cnt_act)
13521                                         return -rte_errno;
13522                                 dev_flow->dv.actions[actions_n++] =
13523                                                                 cnt_act->action;
13524                         }
13525                 default:
13526                         break;
13527                 }
13528                 if (mhdr_res->actions_num &&
13529                     modify_action_position == UINT32_MAX)
13530                         modify_action_position = actions_n++;
13531         }
13532         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
13533                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
13534                 int item_type = items->type;
13535
13536                 if (!mlx5_flow_os_item_supported(item_type))
13537                         return rte_flow_error_set(error, ENOTSUP,
13538                                                   RTE_FLOW_ERROR_TYPE_ITEM,
13539                                                   NULL, "item not supported");
13540                 switch (item_type) {
13541                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
13542                         flow_dv_translate_item_port_id
13543                                 (dev, match_mask, match_value, items, attr);
13544                         last_item = MLX5_FLOW_ITEM_PORT_ID;
13545                         break;
13546                 case RTE_FLOW_ITEM_TYPE_ETH:
13547                         flow_dv_translate_item_eth(match_mask, match_value,
13548                                                    items, tunnel,
13549                                                    dev_flow->dv.group);
13550                         matcher.priority = action_flags &
13551                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
13552                                         !dev_flow->external ?
13553                                         MLX5_PRIORITY_MAP_L3 :
13554                                         MLX5_PRIORITY_MAP_L2;
13555                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
13556                                              MLX5_FLOW_LAYER_OUTER_L2;
13557                         break;
13558                 case RTE_FLOW_ITEM_TYPE_VLAN:
13559                         flow_dv_translate_item_vlan(dev_flow,
13560                                                     match_mask, match_value,
13561                                                     items, tunnel,
13562                                                     dev_flow->dv.group);
13563                         matcher.priority = MLX5_PRIORITY_MAP_L2;
13564                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
13565                                               MLX5_FLOW_LAYER_INNER_VLAN) :
13566                                              (MLX5_FLOW_LAYER_OUTER_L2 |
13567                                               MLX5_FLOW_LAYER_OUTER_VLAN);
13568                         break;
13569                 case RTE_FLOW_ITEM_TYPE_IPV4:
13570                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13571                                                   &item_flags, &tunnel);
13572                         flow_dv_translate_item_ipv4(match_mask, match_value,
13573                                                     items, tunnel,
13574                                                     dev_flow->dv.group);
13575                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13576                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
13577                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
13578                         if (items->mask != NULL &&
13579                             ((const struct rte_flow_item_ipv4 *)
13580                              items->mask)->hdr.next_proto_id) {
13581                                 next_protocol =
13582                                         ((const struct rte_flow_item_ipv4 *)
13583                                          (items->spec))->hdr.next_proto_id;
13584                                 next_protocol &=
13585                                         ((const struct rte_flow_item_ipv4 *)
13586                                          (items->mask))->hdr.next_proto_id;
13587                         } else {
13588                                 /* Reset for inner layer. */
13589                                 next_protocol = 0xff;
13590                         }
13591                         break;
13592                 case RTE_FLOW_ITEM_TYPE_IPV6:
13593                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13594                                                   &item_flags, &tunnel);
13595                         flow_dv_translate_item_ipv6(match_mask, match_value,
13596                                                     items, tunnel,
13597                                                     dev_flow->dv.group);
13598                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13599                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
13600                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
13601                         if (items->mask != NULL &&
13602                             ((const struct rte_flow_item_ipv6 *)
13603                              items->mask)->hdr.proto) {
13604                                 next_protocol =
13605                                         ((const struct rte_flow_item_ipv6 *)
13606                                          items->spec)->hdr.proto;
13607                                 next_protocol &=
13608                                         ((const struct rte_flow_item_ipv6 *)
13609                                          items->mask)->hdr.proto;
13610                         } else {
13611                                 /* Reset for inner layer. */
13612                                 next_protocol = 0xff;
13613                         }
13614                         break;
13615                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
13616                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
13617                                                              match_value,
13618                                                              items, tunnel);
13619                         last_item = tunnel ?
13620                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
13621                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
13622                         if (items->mask != NULL &&
13623                             ((const struct rte_flow_item_ipv6_frag_ext *)
13624                              items->mask)->hdr.next_header) {
13625                                 next_protocol =
13626                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13627                                  items->spec)->hdr.next_header;
13628                                 next_protocol &=
13629                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13630                                  items->mask)->hdr.next_header;
13631                         } else {
13632                                 /* Reset for inner layer. */
13633                                 next_protocol = 0xff;
13634                         }
13635                         break;
13636                 case RTE_FLOW_ITEM_TYPE_TCP:
13637                         flow_dv_translate_item_tcp(match_mask, match_value,
13638                                                    items, tunnel);
13639                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13640                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
13641                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
13642                         break;
13643                 case RTE_FLOW_ITEM_TYPE_UDP:
13644                         flow_dv_translate_item_udp(match_mask, match_value,
13645                                                    items, tunnel);
13646                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13647                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
13648                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
13649                         break;
13650                 case RTE_FLOW_ITEM_TYPE_GRE:
13651                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13652                         last_item = MLX5_FLOW_LAYER_GRE;
13653                         tunnel_item = items;
13654                         gre_item = items;
13655                         break;
13656                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
13657                         flow_dv_translate_item_gre_key(match_mask,
13658                                                        match_value, items);
13659                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
13660                         break;
13661                 case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
13662                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13663                         last_item = MLX5_FLOW_LAYER_GRE;
13664                         tunnel_item = items;
13665                         break;
13666                 case RTE_FLOW_ITEM_TYPE_NVGRE:
13667                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13668                         last_item = MLX5_FLOW_LAYER_GRE;
13669                         tunnel_item = items;
13670                         break;
13671                 case RTE_FLOW_ITEM_TYPE_VXLAN:
13672                         flow_dv_translate_item_vxlan(dev, attr,
13673                                                      match_mask, match_value,
13674                                                      items, tunnel);
13675                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13676                         last_item = MLX5_FLOW_LAYER_VXLAN;
13677                         break;
13678                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
13679                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13680                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
13681                         tunnel_item = items;
13682                         break;
13683                 case RTE_FLOW_ITEM_TYPE_GENEVE:
13684                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13685                         last_item = MLX5_FLOW_LAYER_GENEVE;
13686                         tunnel_item = items;
13687                         break;
13688                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
13689                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
13690                                                           match_value,
13691                                                           items, error);
13692                         if (ret)
13693                                 return rte_flow_error_set(error, -ret,
13694                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13695                                         "cannot create GENEVE TLV option");
13696                         flow->geneve_tlv_option = 1;
13697                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
13698                         break;
13699                 case RTE_FLOW_ITEM_TYPE_MPLS:
13700                         flow_dv_translate_item_mpls(match_mask, match_value,
13701                                                     items, last_item, tunnel);
13702                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13703                         last_item = MLX5_FLOW_LAYER_MPLS;
13704                         break;
13705                 case RTE_FLOW_ITEM_TYPE_MARK:
13706                         flow_dv_translate_item_mark(dev, match_mask,
13707                                                     match_value, items);
13708                         last_item = MLX5_FLOW_ITEM_MARK;
13709                         break;
13710                 case RTE_FLOW_ITEM_TYPE_META:
13711                         flow_dv_translate_item_meta(dev, match_mask,
13712                                                     match_value, attr, items);
13713                         last_item = MLX5_FLOW_ITEM_METADATA;
13714                         break;
13715                 case RTE_FLOW_ITEM_TYPE_ICMP:
13716                         flow_dv_translate_item_icmp(match_mask, match_value,
13717                                                     items, tunnel);
13718                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13719                         last_item = MLX5_FLOW_LAYER_ICMP;
13720                         break;
13721                 case RTE_FLOW_ITEM_TYPE_ICMP6:
13722                         flow_dv_translate_item_icmp6(match_mask, match_value,
13723                                                       items, tunnel);
13724                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13725                         last_item = MLX5_FLOW_LAYER_ICMP6;
13726                         break;
13727                 case RTE_FLOW_ITEM_TYPE_TAG:
13728                         flow_dv_translate_item_tag(dev, match_mask,
13729                                                    match_value, items);
13730                         last_item = MLX5_FLOW_ITEM_TAG;
13731                         break;
13732                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
13733                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
13734                                                         match_value, items);
13735                         last_item = MLX5_FLOW_ITEM_TAG;
13736                         break;
13737                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
13738                         flow_dv_translate_item_tx_queue(dev, match_mask,
13739                                                         match_value,
13740                                                         items);
13741                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
13742                         break;
13743                 case RTE_FLOW_ITEM_TYPE_GTP:
13744                         flow_dv_translate_item_gtp(match_mask, match_value,
13745                                                    items, tunnel);
13746                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13747                         last_item = MLX5_FLOW_LAYER_GTP;
13748                         break;
13749                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
13750                         ret = flow_dv_translate_item_gtp_psc(match_mask,
13751                                                           match_value,
13752                                                           items);
13753                         if (ret)
13754                                 return rte_flow_error_set(error, -ret,
13755                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13756                                         "cannot create GTP PSC item");
13757                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
13758                         break;
13759                 case RTE_FLOW_ITEM_TYPE_ECPRI:
13760                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
13761                                 /* Create it only the first time to be used. */
13762                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
13763                                 if (ret)
13764                                         return rte_flow_error_set
13765                                                 (error, -ret,
13766                                                 RTE_FLOW_ERROR_TYPE_ITEM,
13767                                                 NULL,
13768                                                 "cannot create eCPRI parser");
13769                         }
13770                         flow_dv_translate_item_ecpri(dev, match_mask,
13771                                                      match_value, items,
13772                                                      last_item);
13773                         /* No other protocol should follow eCPRI layer. */
13774                         last_item = MLX5_FLOW_LAYER_ECPRI;
13775                         break;
13776                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
13777                         flow_dv_translate_item_integrity(items, integrity_items,
13778                                                          &last_item);
13779                         break;
13780                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
13781                         flow_dv_translate_item_aso_ct(dev, match_mask,
13782                                                       match_value, items);
13783                         break;
13784                 case RTE_FLOW_ITEM_TYPE_FLEX:
13785                         flow_dv_translate_item_flex(dev, match_mask,
13786                                                     match_value, items,
13787                                                     dev_flow, tunnel != 0);
13788                         last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
13789                                     MLX5_FLOW_ITEM_OUTER_FLEX;
13790                         break;
13791                 default:
13792                         break;
13793                 }
13794                 item_flags |= last_item;
13795         }
13796         /*
13797          * When E-Switch mode is enabled, we have two cases where we need to
13798          * set the source port manually.
13799          * The first one, is in case of NIC ingress steering rule, and the
13800          * second is E-Switch rule where no port_id item was found.
13801          * In both cases the source port is set according the current port
13802          * in use.
13803          */
13804         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) && priv->sh->esw_mode &&
13805             !(attr->egress && !attr->transfer)) {
13806                 if (flow_dv_translate_item_port_id(dev, match_mask,
13807                                                    match_value, NULL, attr))
13808                         return -rte_errno;
13809         }
13810         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
13811                 flow_dv_translate_item_integrity_post(match_mask, match_value,
13812                                                       integrity_items,
13813                                                       item_flags);
13814         }
13815         if (item_flags & MLX5_FLOW_LAYER_VXLAN_GPE)
13816                 flow_dv_translate_item_vxlan_gpe(match_mask, match_value,
13817                                                  tunnel_item, item_flags);
13818         else if (item_flags & MLX5_FLOW_LAYER_GENEVE)
13819                 flow_dv_translate_item_geneve(match_mask, match_value,
13820                                               tunnel_item, item_flags);
13821         else if (item_flags & MLX5_FLOW_LAYER_GRE) {
13822                 if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE)
13823                         flow_dv_translate_item_gre(match_mask, match_value,
13824                                                    tunnel_item, item_flags);
13825                 else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
13826                         flow_dv_translate_item_nvgre(match_mask, match_value,
13827                                                      tunnel_item, item_flags);
13828                 else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE_OPTION)
13829                         flow_dv_translate_item_gre_option(match_mask, match_value,
13830                                         tunnel_item, gre_item, item_flags);
13831                 else
13832                         MLX5_ASSERT(false);
13833         }
13834 #ifdef RTE_LIBRTE_MLX5_DEBUG
13835         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
13836                                               dev_flow->dv.value.buf));
13837 #endif
13838         /*
13839          * Layers may be already initialized from prefix flow if this dev_flow
13840          * is the suffix flow.
13841          */
13842         handle->layers |= item_flags;
13843         if (action_flags & MLX5_FLOW_ACTION_RSS)
13844                 flow_dv_hashfields_set(dev_flow->handle->layers,
13845                                        rss_desc,
13846                                        &dev_flow->hash_fields);
13847         /* If has RSS action in the sample action, the Sample/Mirror resource
13848          * should be registered after the hash filed be update.
13849          */
13850         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
13851                 ret = flow_dv_translate_action_sample(dev,
13852                                                       sample,
13853                                                       dev_flow, attr,
13854                                                       &num_of_dest,
13855                                                       sample_actions,
13856                                                       &sample_res,
13857                                                       error);
13858                 if (ret < 0)
13859                         return ret;
13860                 ret = flow_dv_create_action_sample(dev,
13861                                                    dev_flow,
13862                                                    num_of_dest,
13863                                                    &sample_res,
13864                                                    &mdest_res,
13865                                                    sample_actions,
13866                                                    action_flags,
13867                                                    error);
13868                 if (ret < 0)
13869                         return rte_flow_error_set
13870                                                 (error, rte_errno,
13871                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13872                                                 NULL,
13873                                                 "cannot create sample action");
13874                 if (num_of_dest > 1) {
13875                         dev_flow->dv.actions[sample_act_pos] =
13876                         dev_flow->dv.dest_array_res->action;
13877                 } else {
13878                         dev_flow->dv.actions[sample_act_pos] =
13879                         dev_flow->dv.sample_res->verbs_action;
13880                 }
13881         }
13882         /*
13883          * For multiple destination (sample action with ratio=1), the encap
13884          * action and port id action will be combined into group action.
13885          * So need remove the original these actions in the flow and only
13886          * use the sample action instead of.
13887          */
13888         if (num_of_dest > 1 &&
13889             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
13890                 int i;
13891                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
13892
13893                 for (i = 0; i < actions_n; i++) {
13894                         if ((sample_act->dr_encap_action &&
13895                                 sample_act->dr_encap_action ==
13896                                 dev_flow->dv.actions[i]) ||
13897                                 (sample_act->dr_port_id_action &&
13898                                 sample_act->dr_port_id_action ==
13899                                 dev_flow->dv.actions[i]) ||
13900                                 (sample_act->dr_jump_action &&
13901                                 sample_act->dr_jump_action ==
13902                                 dev_flow->dv.actions[i]))
13903                                 continue;
13904                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
13905                 }
13906                 memcpy((void *)dev_flow->dv.actions,
13907                                 (void *)temp_actions,
13908                                 tmp_actions_n * sizeof(void *));
13909                 actions_n = tmp_actions_n;
13910         }
13911         dev_flow->dv.actions_n = actions_n;
13912         dev_flow->act_flags = action_flags;
13913         if (wks->skip_matcher_reg)
13914                 return 0;
13915         /* Register matcher. */
13916         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
13917                                     matcher.mask.size);
13918         matcher.priority = mlx5_get_matcher_priority(dev, attr,
13919                                                      matcher.priority,
13920                                                      dev_flow->external);
13921         /**
13922          * When creating meter drop flow in drop table, using original
13923          * 5-tuple match, the matcher priority should be lower than
13924          * mtr_id matcher.
13925          */
13926         if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
13927             dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP &&
13928             matcher.priority <= MLX5_REG_BITS)
13929                 matcher.priority += MLX5_REG_BITS;
13930         /* reserved field no needs to be set to 0 here. */
13931         tbl_key.is_fdb = attr->transfer;
13932         tbl_key.is_egress = attr->egress;
13933         tbl_key.level = dev_flow->dv.group;
13934         tbl_key.id = dev_flow->dv.table_id;
13935         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
13936                                      tunnel, attr->group, error))
13937                 return -rte_errno;
13938         return 0;
13939 }
13940
13941 /**
13942  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13943  * and tunnel.
13944  *
13945  * @param[in, out] action
13946  *   Shred RSS action holding hash RX queue objects.
13947  * @param[in] hash_fields
13948  *   Defines combination of packet fields to participate in RX hash.
13949  * @param[in] tunnel
13950  *   Tunnel type
13951  * @param[in] hrxq_idx
13952  *   Hash RX queue index to set.
13953  *
13954  * @return
13955  *   0 on success, otherwise negative errno value.
13956  */
13957 static int
13958 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
13959                               const uint64_t hash_fields,
13960                               uint32_t hrxq_idx)
13961 {
13962         uint32_t *hrxqs = action->hrxq;
13963
13964         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13965         case MLX5_RSS_HASH_IPV4:
13966                 /* fall-through. */
13967         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13968                 /* fall-through. */
13969         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13970                 hrxqs[0] = hrxq_idx;
13971                 return 0;
13972         case MLX5_RSS_HASH_IPV4_TCP:
13973                 /* fall-through. */
13974         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13975                 /* fall-through. */
13976         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13977                 hrxqs[1] = hrxq_idx;
13978                 return 0;
13979         case MLX5_RSS_HASH_IPV4_UDP:
13980                 /* fall-through. */
13981         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13982                 /* fall-through. */
13983         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13984                 hrxqs[2] = hrxq_idx;
13985                 return 0;
13986         case MLX5_RSS_HASH_IPV6:
13987                 /* fall-through. */
13988         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13989                 /* fall-through. */
13990         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13991                 hrxqs[3] = hrxq_idx;
13992                 return 0;
13993         case MLX5_RSS_HASH_IPV6_TCP:
13994                 /* fall-through. */
13995         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13996                 /* fall-through. */
13997         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13998                 hrxqs[4] = hrxq_idx;
13999                 return 0;
14000         case MLX5_RSS_HASH_IPV6_UDP:
14001                 /* fall-through. */
14002         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
14003                 /* fall-through. */
14004         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
14005                 hrxqs[5] = hrxq_idx;
14006                 return 0;
14007         case MLX5_RSS_HASH_NONE:
14008                 hrxqs[6] = hrxq_idx;
14009                 return 0;
14010         default:
14011                 return -1;
14012         }
14013 }
14014
14015 /**
14016  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
14017  * and tunnel.
14018  *
14019  * @param[in] dev
14020  *   Pointer to the Ethernet device structure.
14021  * @param[in] idx
14022  *   Shared RSS action ID holding hash RX queue objects.
14023  * @param[in] hash_fields
14024  *   Defines combination of packet fields to participate in RX hash.
14025  * @param[in] tunnel
14026  *   Tunnel type
14027  *
14028  * @return
14029  *   Valid hash RX queue index, otherwise 0.
14030  */
14031 uint32_t
14032 flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
14033                                const uint64_t hash_fields)
14034 {
14035         struct mlx5_priv *priv = dev->data->dev_private;
14036         struct mlx5_shared_action_rss *shared_rss =
14037             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14038         const uint32_t *hrxqs = shared_rss->hrxq;
14039
14040         switch (hash_fields & ~IBV_RX_HASH_INNER) {
14041         case MLX5_RSS_HASH_IPV4:
14042                 /* fall-through. */
14043         case MLX5_RSS_HASH_IPV4_DST_ONLY:
14044                 /* fall-through. */
14045         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
14046                 return hrxqs[0];
14047         case MLX5_RSS_HASH_IPV4_TCP:
14048                 /* fall-through. */
14049         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
14050                 /* fall-through. */
14051         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
14052                 return hrxqs[1];
14053         case MLX5_RSS_HASH_IPV4_UDP:
14054                 /* fall-through. */
14055         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
14056                 /* fall-through. */
14057         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
14058                 return hrxqs[2];
14059         case MLX5_RSS_HASH_IPV6:
14060                 /* fall-through. */
14061         case MLX5_RSS_HASH_IPV6_DST_ONLY:
14062                 /* fall-through. */
14063         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
14064                 return hrxqs[3];
14065         case MLX5_RSS_HASH_IPV6_TCP:
14066                 /* fall-through. */
14067         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
14068                 /* fall-through. */
14069         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
14070                 return hrxqs[4];
14071         case MLX5_RSS_HASH_IPV6_UDP:
14072                 /* fall-through. */
14073         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
14074                 /* fall-through. */
14075         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
14076                 return hrxqs[5];
14077         case MLX5_RSS_HASH_NONE:
14078                 return hrxqs[6];
14079         default:
14080                 return 0;
14081         }
14082
14083 }
14084
14085 /**
14086  * Apply the flow to the NIC, lock free,
14087  * (mutex should be acquired by caller).
14088  *
14089  * @param[in] dev
14090  *   Pointer to the Ethernet device structure.
14091  * @param[in, out] flow
14092  *   Pointer to flow structure.
14093  * @param[out] error
14094  *   Pointer to error structure.
14095  *
14096  * @return
14097  *   0 on success, a negative errno value otherwise and rte_errno is set.
14098  */
14099 static int
14100 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
14101               struct rte_flow_error *error)
14102 {
14103         struct mlx5_flow_dv_workspace *dv;
14104         struct mlx5_flow_handle *dh;
14105         struct mlx5_flow_handle_dv *dv_h;
14106         struct mlx5_flow *dev_flow;
14107         struct mlx5_priv *priv = dev->data->dev_private;
14108         uint32_t handle_idx;
14109         int n;
14110         int err;
14111         int idx;
14112         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
14113         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
14114         uint8_t misc_mask;
14115
14116         MLX5_ASSERT(wks);
14117         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
14118                 dev_flow = &wks->flows[idx];
14119                 dv = &dev_flow->dv;
14120                 dh = dev_flow->handle;
14121                 dv_h = &dh->dvh;
14122                 n = dv->actions_n;
14123                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
14124                         if (dv->transfer) {
14125                                 MLX5_ASSERT(priv->sh->dr_drop_action);
14126                                 dv->actions[n++] = priv->sh->dr_drop_action;
14127                         } else {
14128 #ifdef HAVE_MLX5DV_DR
14129                                 /* DR supports drop action placeholder. */
14130                                 MLX5_ASSERT(priv->sh->dr_drop_action);
14131                                 dv->actions[n++] = dv->group ?
14132                                         priv->sh->dr_drop_action :
14133                                         priv->root_drop_action;
14134 #else
14135                                 /* For DV we use the explicit drop queue. */
14136                                 MLX5_ASSERT(priv->drop_queue.hrxq);
14137                                 dv->actions[n++] =
14138                                                 priv->drop_queue.hrxq->action;
14139 #endif
14140                         }
14141                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
14142                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
14143                         struct mlx5_hrxq *hrxq;
14144                         uint32_t hrxq_idx;
14145
14146                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
14147                                                     &hrxq_idx);
14148                         if (!hrxq) {
14149                                 rte_flow_error_set
14150                                         (error, rte_errno,
14151                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14152                                          "cannot get hash queue");
14153                                 goto error;
14154                         }
14155                         dh->rix_hrxq = hrxq_idx;
14156                         dv->actions[n++] = hrxq->action;
14157                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
14158                         struct mlx5_hrxq *hrxq = NULL;
14159                         uint32_t hrxq_idx;
14160
14161                         hrxq_idx = flow_dv_action_rss_hrxq_lookup(dev,
14162                                                 rss_desc->shared_rss,
14163                                                 dev_flow->hash_fields);
14164                         if (hrxq_idx)
14165                                 hrxq = mlx5_ipool_get
14166                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
14167                                          hrxq_idx);
14168                         if (!hrxq) {
14169                                 rte_flow_error_set
14170                                         (error, rte_errno,
14171                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14172                                          "cannot get hash queue");
14173                                 goto error;
14174                         }
14175                         dh->rix_srss = rss_desc->shared_rss;
14176                         dv->actions[n++] = hrxq->action;
14177                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
14178                         if (!priv->sh->default_miss_action) {
14179                                 rte_flow_error_set
14180                                         (error, rte_errno,
14181                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14182                                          "default miss action not be created.");
14183                                 goto error;
14184                         }
14185                         dv->actions[n++] = priv->sh->default_miss_action;
14186                 }
14187                 misc_mask = flow_dv_matcher_enable(dv->value.buf);
14188                 __flow_dv_adjust_buf_size(&dv->value.size, misc_mask);
14189                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
14190                                                (void *)&dv->value, n,
14191                                                dv->actions, &dh->drv_flow);
14192                 if (err) {
14193                         rte_flow_error_set
14194                                 (error, errno,
14195                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14196                                 NULL,
14197                                 (!priv->sh->config.allow_duplicate_pattern &&
14198                                 errno == EEXIST) ?
14199                                 "duplicating pattern is not allowed" :
14200                                 "hardware refuses to create flow");
14201                         goto error;
14202                 }
14203                 if (priv->vmwa_context &&
14204                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
14205                         /*
14206                          * The rule contains the VLAN pattern.
14207                          * For VF we are going to create VLAN
14208                          * interface to make hypervisor set correct
14209                          * e-Switch vport context.
14210                          */
14211                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
14212                 }
14213         }
14214         return 0;
14215 error:
14216         err = rte_errno; /* Save rte_errno before cleanup. */
14217         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
14218                        handle_idx, dh, next) {
14219                 /* hrxq is union, don't clear it if the flag is not set. */
14220                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
14221                         mlx5_hrxq_release(dev, dh->rix_hrxq);
14222                         dh->rix_hrxq = 0;
14223                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
14224                         dh->rix_srss = 0;
14225                 }
14226                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
14227                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
14228         }
14229         rte_errno = err; /* Restore rte_errno. */
14230         return -rte_errno;
14231 }
14232
14233 void
14234 flow_dv_matcher_remove_cb(void *tool_ctx __rte_unused,
14235                           struct mlx5_list_entry *entry)
14236 {
14237         struct mlx5_flow_dv_matcher *resource = container_of(entry,
14238                                                              typeof(*resource),
14239                                                              entry);
14240
14241         claim_zero(mlx5_flow_os_destroy_flow_matcher(resource->matcher_object));
14242         mlx5_free(resource);
14243 }
14244
14245 /**
14246  * Release the flow matcher.
14247  *
14248  * @param dev
14249  *   Pointer to Ethernet device.
14250  * @param port_id
14251  *   Index to port ID action resource.
14252  *
14253  * @return
14254  *   1 while a reference on it exists, 0 when freed.
14255  */
14256 static int
14257 flow_dv_matcher_release(struct rte_eth_dev *dev,
14258                         struct mlx5_flow_handle *handle)
14259 {
14260         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
14261         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
14262                                                             typeof(*tbl), tbl);
14263         int ret;
14264
14265         MLX5_ASSERT(matcher->matcher_object);
14266         ret = mlx5_list_unregister(tbl->matchers, &matcher->entry);
14267         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
14268         return ret;
14269 }
14270
14271 void
14272 flow_dv_encap_decap_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14273 {
14274         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14275         struct mlx5_flow_dv_encap_decap_resource *res =
14276                                        container_of(entry, typeof(*res), entry);
14277
14278         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
14279         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
14280 }
14281
14282 /**
14283  * Release an encap/decap resource.
14284  *
14285  * @param dev
14286  *   Pointer to Ethernet device.
14287  * @param encap_decap_idx
14288  *   Index of encap decap resource.
14289  *
14290  * @return
14291  *   1 while a reference on it exists, 0 when freed.
14292  */
14293 static int
14294 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
14295                                      uint32_t encap_decap_idx)
14296 {
14297         struct mlx5_priv *priv = dev->data->dev_private;
14298         struct mlx5_flow_dv_encap_decap_resource *resource;
14299
14300         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
14301                                   encap_decap_idx);
14302         if (!resource)
14303                 return 0;
14304         MLX5_ASSERT(resource->action);
14305         return mlx5_hlist_unregister(priv->sh->encaps_decaps, &resource->entry);
14306 }
14307
14308 /**
14309  * Release an jump to table action resource.
14310  *
14311  * @param dev
14312  *   Pointer to Ethernet device.
14313  * @param rix_jump
14314  *   Index to the jump action resource.
14315  *
14316  * @return
14317  *   1 while a reference on it exists, 0 when freed.
14318  */
14319 static int
14320 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
14321                                   uint32_t rix_jump)
14322 {
14323         struct mlx5_priv *priv = dev->data->dev_private;
14324         struct mlx5_flow_tbl_data_entry *tbl_data;
14325
14326         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
14327                                   rix_jump);
14328         if (!tbl_data)
14329                 return 0;
14330         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
14331 }
14332
14333 void
14334 flow_dv_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14335 {
14336         struct mlx5_flow_dv_modify_hdr_resource *res =
14337                 container_of(entry, typeof(*res), entry);
14338         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14339
14340         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
14341         mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
14342 }
14343
14344 /**
14345  * Release a modify-header resource.
14346  *
14347  * @param dev
14348  *   Pointer to Ethernet device.
14349  * @param handle
14350  *   Pointer to mlx5_flow_handle.
14351  *
14352  * @return
14353  *   1 while a reference on it exists, 0 when freed.
14354  */
14355 static int
14356 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
14357                                     struct mlx5_flow_handle *handle)
14358 {
14359         struct mlx5_priv *priv = dev->data->dev_private;
14360         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
14361
14362         MLX5_ASSERT(entry->action);
14363         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
14364 }
14365
14366 void
14367 flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14368 {
14369         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14370         struct mlx5_flow_dv_port_id_action_resource *resource =
14371                                   container_of(entry, typeof(*resource), entry);
14372
14373         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14374         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
14375 }
14376
14377 /**
14378  * Release port ID action resource.
14379  *
14380  * @param dev
14381  *   Pointer to Ethernet device.
14382  * @param handle
14383  *   Pointer to mlx5_flow_handle.
14384  *
14385  * @return
14386  *   1 while a reference on it exists, 0 when freed.
14387  */
14388 static int
14389 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
14390                                         uint32_t port_id)
14391 {
14392         struct mlx5_priv *priv = dev->data->dev_private;
14393         struct mlx5_flow_dv_port_id_action_resource *resource;
14394
14395         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
14396         if (!resource)
14397                 return 0;
14398         MLX5_ASSERT(resource->action);
14399         return mlx5_list_unregister(priv->sh->port_id_action_list,
14400                                     &resource->entry);
14401 }
14402
14403 /**
14404  * Release shared RSS action resource.
14405  *
14406  * @param dev
14407  *   Pointer to Ethernet device.
14408  * @param srss
14409  *   Shared RSS action index.
14410  */
14411 static void
14412 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
14413 {
14414         struct mlx5_priv *priv = dev->data->dev_private;
14415         struct mlx5_shared_action_rss *shared_rss;
14416
14417         shared_rss = mlx5_ipool_get
14418                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
14419         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14420 }
14421
14422 void
14423 flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14424 {
14425         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14426         struct mlx5_flow_dv_push_vlan_action_resource *resource =
14427                         container_of(entry, typeof(*resource), entry);
14428
14429         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14430         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
14431 }
14432
14433 /**
14434  * Release push vlan action resource.
14435  *
14436  * @param dev
14437  *   Pointer to Ethernet device.
14438  * @param handle
14439  *   Pointer to mlx5_flow_handle.
14440  *
14441  * @return
14442  *   1 while a reference on it exists, 0 when freed.
14443  */
14444 static int
14445 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
14446                                           struct mlx5_flow_handle *handle)
14447 {
14448         struct mlx5_priv *priv = dev->data->dev_private;
14449         struct mlx5_flow_dv_push_vlan_action_resource *resource;
14450         uint32_t idx = handle->dvh.rix_push_vlan;
14451
14452         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
14453         if (!resource)
14454                 return 0;
14455         MLX5_ASSERT(resource->action);
14456         return mlx5_list_unregister(priv->sh->push_vlan_action_list,
14457                                     &resource->entry);
14458 }
14459
14460 /**
14461  * Release the fate resource.
14462  *
14463  * @param dev
14464  *   Pointer to Ethernet device.
14465  * @param handle
14466  *   Pointer to mlx5_flow_handle.
14467  */
14468 static void
14469 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
14470                                struct mlx5_flow_handle *handle)
14471 {
14472         if (!handle->rix_fate)
14473                 return;
14474         switch (handle->fate_action) {
14475         case MLX5_FLOW_FATE_QUEUE:
14476                 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
14477                         mlx5_hrxq_release(dev, handle->rix_hrxq);
14478                 break;
14479         case MLX5_FLOW_FATE_JUMP:
14480                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
14481                 break;
14482         case MLX5_FLOW_FATE_PORT_ID:
14483                 flow_dv_port_id_action_resource_release(dev,
14484                                 handle->rix_port_id_action);
14485                 break;
14486         default:
14487                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
14488                 break;
14489         }
14490         handle->rix_fate = 0;
14491 }
14492
14493 void
14494 flow_dv_sample_remove_cb(void *tool_ctx __rte_unused,
14495                          struct mlx5_list_entry *entry)
14496 {
14497         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
14498                                                               typeof(*resource),
14499                                                               entry);
14500         struct rte_eth_dev *dev = resource->dev;
14501         struct mlx5_priv *priv = dev->data->dev_private;
14502
14503         if (resource->verbs_action)
14504                 claim_zero(mlx5_flow_os_destroy_flow_action
14505                                                       (resource->verbs_action));
14506         if (resource->normal_path_tbl)
14507                 flow_dv_tbl_resource_release(MLX5_SH(dev),
14508                                              resource->normal_path_tbl);
14509         flow_dv_sample_sub_actions_release(dev, &resource->sample_idx);
14510         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
14511         DRV_LOG(DEBUG, "sample resource %p: removed", (void *)resource);
14512 }
14513
14514 /**
14515  * Release an sample resource.
14516  *
14517  * @param dev
14518  *   Pointer to Ethernet device.
14519  * @param handle
14520  *   Pointer to mlx5_flow_handle.
14521  *
14522  * @return
14523  *   1 while a reference on it exists, 0 when freed.
14524  */
14525 static int
14526 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
14527                                      struct mlx5_flow_handle *handle)
14528 {
14529         struct mlx5_priv *priv = dev->data->dev_private;
14530         struct mlx5_flow_dv_sample_resource *resource;
14531
14532         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
14533                                   handle->dvh.rix_sample);
14534         if (!resource)
14535                 return 0;
14536         MLX5_ASSERT(resource->verbs_action);
14537         return mlx5_list_unregister(priv->sh->sample_action_list,
14538                                     &resource->entry);
14539 }
14540
14541 void
14542 flow_dv_dest_array_remove_cb(void *tool_ctx __rte_unused,
14543                              struct mlx5_list_entry *entry)
14544 {
14545         struct mlx5_flow_dv_dest_array_resource *resource =
14546                         container_of(entry, typeof(*resource), entry);
14547         struct rte_eth_dev *dev = resource->dev;
14548         struct mlx5_priv *priv = dev->data->dev_private;
14549         uint32_t i = 0;
14550
14551         MLX5_ASSERT(resource->action);
14552         if (resource->action)
14553                 claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14554         for (; i < resource->num_of_dest; i++)
14555                 flow_dv_sample_sub_actions_release(dev,
14556                                                    &resource->sample_idx[i]);
14557         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
14558         DRV_LOG(DEBUG, "destination array resource %p: removed",
14559                 (void *)resource);
14560 }
14561
14562 /**
14563  * Release an destination array resource.
14564  *
14565  * @param dev
14566  *   Pointer to Ethernet device.
14567  * @param handle
14568  *   Pointer to mlx5_flow_handle.
14569  *
14570  * @return
14571  *   1 while a reference on it exists, 0 when freed.
14572  */
14573 static int
14574 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
14575                                     struct mlx5_flow_handle *handle)
14576 {
14577         struct mlx5_priv *priv = dev->data->dev_private;
14578         struct mlx5_flow_dv_dest_array_resource *resource;
14579
14580         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
14581                                   handle->dvh.rix_dest_array);
14582         if (!resource)
14583                 return 0;
14584         MLX5_ASSERT(resource->action);
14585         return mlx5_list_unregister(priv->sh->dest_array_list,
14586                                     &resource->entry);
14587 }
14588
14589 static void
14590 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
14591 {
14592         struct mlx5_priv *priv = dev->data->dev_private;
14593         struct mlx5_dev_ctx_shared *sh = priv->sh;
14594         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
14595                                 sh->geneve_tlv_option_resource;
14596         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
14597         if (geneve_opt_resource) {
14598                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
14599                                          __ATOMIC_RELAXED))) {
14600                         claim_zero(mlx5_devx_cmd_destroy
14601                                         (geneve_opt_resource->obj));
14602                         mlx5_free(sh->geneve_tlv_option_resource);
14603                         sh->geneve_tlv_option_resource = NULL;
14604                 }
14605         }
14606         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
14607 }
14608
14609 /**
14610  * Remove the flow from the NIC but keeps it in memory.
14611  * Lock free, (mutex should be acquired by caller).
14612  *
14613  * @param[in] dev
14614  *   Pointer to Ethernet device.
14615  * @param[in, out] flow
14616  *   Pointer to flow structure.
14617  */
14618 static void
14619 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
14620 {
14621         struct mlx5_flow_handle *dh;
14622         uint32_t handle_idx;
14623         struct mlx5_priv *priv = dev->data->dev_private;
14624
14625         if (!flow)
14626                 return;
14627         handle_idx = flow->dev_handles;
14628         while (handle_idx) {
14629                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14630                                     handle_idx);
14631                 if (!dh)
14632                         return;
14633                 if (dh->drv_flow) {
14634                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
14635                         dh->drv_flow = NULL;
14636                 }
14637                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
14638                         flow_dv_fate_resource_release(dev, dh);
14639                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
14640                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
14641                 handle_idx = dh->next.next;
14642         }
14643 }
14644
14645 /**
14646  * Remove the flow from the NIC and the memory.
14647  * Lock free, (mutex should be acquired by caller).
14648  *
14649  * @param[in] dev
14650  *   Pointer to the Ethernet device structure.
14651  * @param[in, out] flow
14652  *   Pointer to flow structure.
14653  */
14654 static void
14655 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
14656 {
14657         struct mlx5_flow_handle *dev_handle;
14658         struct mlx5_priv *priv = dev->data->dev_private;
14659         struct mlx5_flow_meter_info *fm = NULL;
14660         uint32_t srss = 0;
14661
14662         if (!flow)
14663                 return;
14664         flow_dv_remove(dev, flow);
14665         if (flow->counter) {
14666                 flow_dv_counter_free(dev, flow->counter);
14667                 flow->counter = 0;
14668         }
14669         if (flow->meter) {
14670                 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
14671                 if (fm)
14672                         mlx5_flow_meter_detach(priv, fm);
14673                 flow->meter = 0;
14674         }
14675         /* Keep the current age handling by default. */
14676         if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
14677                 flow_dv_aso_ct_release(dev, flow->ct, NULL);
14678         else if (flow->age)
14679                 flow_dv_aso_age_release(dev, flow->age);
14680         if (flow->geneve_tlv_option) {
14681                 flow_dv_geneve_tlv_option_resource_release(dev);
14682                 flow->geneve_tlv_option = 0;
14683         }
14684         while (flow->dev_handles) {
14685                 uint32_t tmp_idx = flow->dev_handles;
14686
14687                 dev_handle = mlx5_ipool_get(priv->sh->ipool
14688                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
14689                 if (!dev_handle)
14690                         return;
14691                 flow->dev_handles = dev_handle->next.next;
14692                 while (dev_handle->flex_item) {
14693                         int index = rte_bsf32(dev_handle->flex_item);
14694
14695                         mlx5_flex_release_index(dev, index);
14696                         dev_handle->flex_item &= ~(uint8_t)RTE_BIT32(index);
14697                 }
14698                 if (dev_handle->dvh.matcher)
14699                         flow_dv_matcher_release(dev, dev_handle);
14700                 if (dev_handle->dvh.rix_sample)
14701                         flow_dv_sample_resource_release(dev, dev_handle);
14702                 if (dev_handle->dvh.rix_dest_array)
14703                         flow_dv_dest_array_resource_release(dev, dev_handle);
14704                 if (dev_handle->dvh.rix_encap_decap)
14705                         flow_dv_encap_decap_resource_release(dev,
14706                                 dev_handle->dvh.rix_encap_decap);
14707                 if (dev_handle->dvh.modify_hdr)
14708                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
14709                 if (dev_handle->dvh.rix_push_vlan)
14710                         flow_dv_push_vlan_action_resource_release(dev,
14711                                                                   dev_handle);
14712                 if (dev_handle->dvh.rix_tag)
14713                         flow_dv_tag_release(dev,
14714                                             dev_handle->dvh.rix_tag);
14715                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
14716                         flow_dv_fate_resource_release(dev, dev_handle);
14717                 else if (!srss)
14718                         srss = dev_handle->rix_srss;
14719                 if (fm && dev_handle->is_meter_flow_id &&
14720                     dev_handle->split_flow_id)
14721                         mlx5_ipool_free(fm->flow_ipool,
14722                                         dev_handle->split_flow_id);
14723                 else if (dev_handle->split_flow_id &&
14724                     !dev_handle->is_meter_flow_id)
14725                         mlx5_ipool_free(priv->sh->ipool
14726                                         [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
14727                                         dev_handle->split_flow_id);
14728                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14729                            tmp_idx);
14730         }
14731         if (srss)
14732                 flow_dv_shared_rss_action_release(dev, srss);
14733 }
14734
14735 /**
14736  * Release array of hash RX queue objects.
14737  * Helper function.
14738  *
14739  * @param[in] dev
14740  *   Pointer to the Ethernet device structure.
14741  * @param[in, out] hrxqs
14742  *   Array of hash RX queue objects.
14743  *
14744  * @return
14745  *   Total number of references to hash RX queue objects in *hrxqs* array
14746  *   after this operation.
14747  */
14748 static int
14749 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
14750                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
14751 {
14752         size_t i;
14753         int remaining = 0;
14754
14755         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
14756                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
14757
14758                 if (!ret)
14759                         (*hrxqs)[i] = 0;
14760                 remaining += ret;
14761         }
14762         return remaining;
14763 }
14764
14765 /**
14766  * Release all hash RX queue objects representing shared RSS action.
14767  *
14768  * @param[in] dev
14769  *   Pointer to the Ethernet device structure.
14770  * @param[in, out] action
14771  *   Shared RSS action to remove hash RX queue objects from.
14772  *
14773  * @return
14774  *   Total number of references to hash RX queue objects stored in *action*
14775  *   after this operation.
14776  *   Expected to be 0 if no external references held.
14777  */
14778 static int
14779 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
14780                                  struct mlx5_shared_action_rss *shared_rss)
14781 {
14782         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
14783 }
14784
14785 /**
14786  * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
14787  * user input.
14788  *
14789  * Only one hash value is available for one L3+L4 combination:
14790  * for example:
14791  * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
14792  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
14793  * same slot in mlx5_rss_hash_fields.
14794  *
14795  * @param[in] rss_types
14796  *   RSS type.
14797  * @param[in, out] hash_field
14798  *   hash_field variable needed to be adjusted.
14799  *
14800  * @return
14801  *   void
14802  */
14803 void
14804 flow_dv_action_rss_l34_hash_adjust(uint64_t rss_types,
14805                                    uint64_t *hash_field)
14806 {
14807         switch (*hash_field & ~IBV_RX_HASH_INNER) {
14808         case MLX5_RSS_HASH_IPV4:
14809                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
14810                         *hash_field &= ~MLX5_RSS_HASH_IPV4;
14811                         if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
14812                                 *hash_field |= IBV_RX_HASH_DST_IPV4;
14813                         else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
14814                                 *hash_field |= IBV_RX_HASH_SRC_IPV4;
14815                         else
14816                                 *hash_field |= MLX5_RSS_HASH_IPV4;
14817                 }
14818                 return;
14819         case MLX5_RSS_HASH_IPV6:
14820                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
14821                         *hash_field &= ~MLX5_RSS_HASH_IPV6;
14822                         if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
14823                                 *hash_field |= IBV_RX_HASH_DST_IPV6;
14824                         else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
14825                                 *hash_field |= IBV_RX_HASH_SRC_IPV6;
14826                         else
14827                                 *hash_field |= MLX5_RSS_HASH_IPV6;
14828                 }
14829                 return;
14830         case MLX5_RSS_HASH_IPV4_UDP:
14831                 /* fall-through. */
14832         case MLX5_RSS_HASH_IPV6_UDP:
14833                 if (rss_types & RTE_ETH_RSS_UDP) {
14834                         *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
14835                         if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
14836                                 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
14837                         else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
14838                                 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
14839                         else
14840                                 *hash_field |= MLX5_UDP_IBV_RX_HASH;
14841                 }
14842                 return;
14843         case MLX5_RSS_HASH_IPV4_TCP:
14844                 /* fall-through. */
14845         case MLX5_RSS_HASH_IPV6_TCP:
14846                 if (rss_types & RTE_ETH_RSS_TCP) {
14847                         *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
14848                         if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
14849                                 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
14850                         else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
14851                                 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
14852                         else
14853                                 *hash_field |= MLX5_TCP_IBV_RX_HASH;
14854                 }
14855                 return;
14856         default:
14857                 return;
14858         }
14859 }
14860
14861 /**
14862  * Setup shared RSS action.
14863  * Prepare set of hash RX queue objects sufficient to handle all valid
14864  * hash_fields combinations (see enum ibv_rx_hash_fields).
14865  *
14866  * @param[in] dev
14867  *   Pointer to the Ethernet device structure.
14868  * @param[in] action_idx
14869  *   Shared RSS action ipool index.
14870  * @param[in, out] action
14871  *   Partially initialized shared RSS action.
14872  * @param[out] error
14873  *   Perform verbose error reporting if not NULL. Initialized in case of
14874  *   error only.
14875  *
14876  * @return
14877  *   0 on success, otherwise negative errno value.
14878  */
14879 static int
14880 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
14881                            uint32_t action_idx,
14882                            struct mlx5_shared_action_rss *shared_rss,
14883                            struct rte_flow_error *error)
14884 {
14885         struct mlx5_priv *priv = dev->data->dev_private;
14886         struct mlx5_flow_rss_desc rss_desc = { 0 };
14887         size_t i;
14888         int err;
14889
14890         shared_rss->ind_tbl = mlx5_ind_table_obj_new
14891                               (dev, shared_rss->origin.queue,
14892                                shared_rss->origin.queue_num,
14893                                true,
14894                                !!dev->data->dev_started);
14895         if (!shared_rss->ind_tbl)
14896                 return rte_flow_error_set(error, rte_errno,
14897                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14898                                           "cannot setup indirection table");
14899         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
14900         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
14901         rss_desc.const_q = shared_rss->origin.queue;
14902         rss_desc.queue_num = shared_rss->origin.queue_num;
14903         /* Set non-zero value to indicate a shared RSS. */
14904         rss_desc.shared_rss = action_idx;
14905         rss_desc.ind_tbl = shared_rss->ind_tbl;
14906         if (priv->sh->config.dv_flow_en == 2)
14907                 rss_desc.hws_flags = MLX5DR_ACTION_FLAG_HWS_RX;
14908         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
14909                 struct mlx5_hrxq *hrxq;
14910                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
14911                 int tunnel = 0;
14912
14913                 flow_dv_action_rss_l34_hash_adjust(shared_rss->origin.types,
14914                                                    &hash_fields);
14915                 if (shared_rss->origin.level > 1) {
14916                         hash_fields |= IBV_RX_HASH_INNER;
14917                         tunnel = 1;
14918                 }
14919                 rss_desc.tunnel = tunnel;
14920                 rss_desc.hash_fields = hash_fields;
14921                 hrxq = mlx5_hrxq_get(dev, &rss_desc);
14922                 if (!hrxq) {
14923                         rte_flow_error_set
14924                                 (error, rte_errno,
14925                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14926                                  "cannot get hash queue");
14927                         goto error_hrxq_new;
14928                 }
14929                 err = __flow_dv_action_rss_hrxq_set
14930                         (shared_rss, hash_fields, hrxq->idx);
14931                 MLX5_ASSERT(!err);
14932         }
14933         return 0;
14934 error_hrxq_new:
14935         err = rte_errno;
14936         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14937         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
14938                 shared_rss->ind_tbl = NULL;
14939         rte_errno = err;
14940         return -rte_errno;
14941 }
14942
14943 /**
14944  * Create shared RSS action.
14945  *
14946  * @param[in] dev
14947  *   Pointer to the Ethernet device structure.
14948  * @param[in] conf
14949  *   Shared action configuration.
14950  * @param[in] rss
14951  *   RSS action specification used to create shared action.
14952  * @param[out] error
14953  *   Perform verbose error reporting if not NULL. Initialized in case of
14954  *   error only.
14955  *
14956  * @return
14957  *   A valid shared action ID in case of success, 0 otherwise and
14958  *   rte_errno is set.
14959  */
14960 static uint32_t
14961 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
14962                             const struct rte_flow_indir_action_conf *conf,
14963                             const struct rte_flow_action_rss *rss,
14964                             struct rte_flow_error *error)
14965 {
14966         struct mlx5_priv *priv = dev->data->dev_private;
14967         struct mlx5_shared_action_rss *shared_rss = NULL;
14968         struct rte_flow_action_rss *origin;
14969         const uint8_t *rss_key;
14970         uint32_t idx;
14971
14972         RTE_SET_USED(conf);
14973         shared_rss = mlx5_ipool_zmalloc
14974                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
14975         if (!shared_rss) {
14976                 rte_flow_error_set(error, ENOMEM,
14977                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14978                                    "cannot allocate resource memory");
14979                 goto error_rss_init;
14980         }
14981         if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
14982                 rte_flow_error_set(error, E2BIG,
14983                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14984                                    "rss action number out of range");
14985                 goto error_rss_init;
14986         }
14987         origin = &shared_rss->origin;
14988         origin->func = rss->func;
14989         origin->level = rss->level;
14990         /* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
14991         origin->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
14992         /* NULL RSS key indicates default RSS key. */
14993         rss_key = !rss->key ? rss_hash_default_key : rss->key;
14994         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
14995         origin->key = &shared_rss->key[0];
14996         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
14997         origin->queue = rss->queue;
14998         origin->queue_num = rss->queue_num;
14999         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
15000                 goto error_rss_init;
15001         /* Update queue with indirect table queue memoyr. */
15002         origin->queue = shared_rss->ind_tbl->queues;
15003         rte_spinlock_init(&shared_rss->action_rss_sl);
15004         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
15005         rte_spinlock_lock(&priv->shared_act_sl);
15006         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
15007                      &priv->rss_shared_actions, idx, shared_rss, next);
15008         rte_spinlock_unlock(&priv->shared_act_sl);
15009         return idx;
15010 error_rss_init:
15011         if (shared_rss) {
15012                 if (shared_rss->ind_tbl)
15013                         mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl,
15014                                                    !!dev->data->dev_started);
15015                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
15016                                 idx);
15017         }
15018         return 0;
15019 }
15020
15021 /**
15022  * Destroy the shared RSS action.
15023  * Release related hash RX queue objects.
15024  *
15025  * @param[in] dev
15026  *   Pointer to the Ethernet device structure.
15027  * @param[in] idx
15028  *   The shared RSS action object ID to be removed.
15029  * @param[out] error
15030  *   Perform verbose error reporting if not NULL. Initialized in case of
15031  *   error only.
15032  *
15033  * @return
15034  *   0 on success, otherwise negative errno value.
15035  */
15036 static int
15037 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
15038                              struct rte_flow_error *error)
15039 {
15040         struct mlx5_priv *priv = dev->data->dev_private;
15041         struct mlx5_shared_action_rss *shared_rss =
15042             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
15043         uint32_t old_refcnt = 1;
15044         int remaining;
15045
15046         if (!shared_rss)
15047                 return rte_flow_error_set(error, EINVAL,
15048                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15049                                           "invalid shared action");
15050         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
15051                                          0, 0, __ATOMIC_ACQUIRE,
15052                                          __ATOMIC_RELAXED))
15053                 return rte_flow_error_set(error, EBUSY,
15054                                           RTE_FLOW_ERROR_TYPE_ACTION,
15055                                           NULL,
15056                                           "shared rss has references");
15057         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
15058         if (remaining)
15059                 return rte_flow_error_set(error, EBUSY,
15060                                           RTE_FLOW_ERROR_TYPE_ACTION,
15061                                           NULL,
15062                                           "shared rss hrxq has references");
15063         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl,
15064                                                !!dev->data->dev_started);
15065         if (remaining)
15066                 return rte_flow_error_set(error, EBUSY,
15067                                           RTE_FLOW_ERROR_TYPE_ACTION,
15068                                           NULL,
15069                                           "shared rss indirection table has"
15070                                           " references");
15071         rte_spinlock_lock(&priv->shared_act_sl);
15072         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
15073                      &priv->rss_shared_actions, idx, shared_rss, next);
15074         rte_spinlock_unlock(&priv->shared_act_sl);
15075         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
15076                         idx);
15077         return 0;
15078 }
15079
15080 /**
15081  * Create indirect action, lock free,
15082  * (mutex should be acquired by caller).
15083  * Dispatcher for action type specific call.
15084  *
15085  * @param[in] dev
15086  *   Pointer to the Ethernet device structure.
15087  * @param[in] conf
15088  *   Shared action configuration.
15089  * @param[in] action
15090  *   Action specification used to create indirect action.
15091  * @param[out] error
15092  *   Perform verbose error reporting if not NULL. Initialized in case of
15093  *   error only.
15094  *
15095  * @return
15096  *   A valid shared action handle in case of success, NULL otherwise and
15097  *   rte_errno is set.
15098  */
15099 struct rte_flow_action_handle *
15100 flow_dv_action_create(struct rte_eth_dev *dev,
15101                       const struct rte_flow_indir_action_conf *conf,
15102                       const struct rte_flow_action *action,
15103                       struct rte_flow_error *err)
15104 {
15105         struct mlx5_priv *priv = dev->data->dev_private;
15106         uint32_t age_idx = 0;
15107         uint32_t idx = 0;
15108         uint32_t ret = 0;
15109
15110         switch (action->type) {
15111         case RTE_FLOW_ACTION_TYPE_RSS:
15112                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
15113                 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
15114                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
15115                 break;
15116         case RTE_FLOW_ACTION_TYPE_AGE:
15117                 age_idx = flow_dv_aso_age_alloc(dev, err);
15118                 if (!age_idx) {
15119                         ret = -rte_errno;
15120                         break;
15121                 }
15122                 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
15123                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
15124                 flow_dv_aso_age_params_init(dev, age_idx,
15125                                         ((const struct rte_flow_action_age *)
15126                                                 action->conf)->context ?
15127                                         ((const struct rte_flow_action_age *)
15128                                                 action->conf)->context :
15129                                         (void *)(uintptr_t)idx,
15130                                         ((const struct rte_flow_action_age *)
15131                                                 action->conf)->timeout);
15132                 ret = age_idx;
15133                 break;
15134         case RTE_FLOW_ACTION_TYPE_COUNT:
15135                 ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
15136                 idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
15137                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
15138                 break;
15139         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
15140                 ret = flow_dv_translate_create_conntrack(dev, action->conf,
15141                                                          err);
15142                 idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
15143                 break;
15144         default:
15145                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
15146                                    NULL, "action type not supported");
15147                 break;
15148         }
15149         return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
15150 }
15151
15152 /**
15153  * Destroy the indirect action.
15154  * Release action related resources on the NIC and the memory.
15155  * Lock free, (mutex should be acquired by caller).
15156  * Dispatcher for action type specific call.
15157  *
15158  * @param[in] dev
15159  *   Pointer to the Ethernet device structure.
15160  * @param[in] handle
15161  *   The indirect action object handle to be removed.
15162  * @param[out] error
15163  *   Perform verbose error reporting if not NULL. Initialized in case of
15164  *   error only.
15165  *
15166  * @return
15167  *   0 on success, otherwise negative errno value.
15168  */
15169 int
15170 flow_dv_action_destroy(struct rte_eth_dev *dev,
15171                        struct rte_flow_action_handle *handle,
15172                        struct rte_flow_error *error)
15173 {
15174         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15175         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15176         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15177         struct mlx5_flow_counter *cnt;
15178         uint32_t no_flow_refcnt = 1;
15179         int ret;
15180
15181         switch (type) {
15182         case MLX5_INDIRECT_ACTION_TYPE_RSS:
15183                 return __flow_dv_action_rss_release(dev, idx, error);
15184         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15185                 cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
15186                 if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
15187                                                  &no_flow_refcnt, 1, false,
15188                                                  __ATOMIC_ACQUIRE,
15189                                                  __ATOMIC_RELAXED))
15190                         return rte_flow_error_set(error, EBUSY,
15191                                                   RTE_FLOW_ERROR_TYPE_ACTION,
15192                                                   NULL,
15193                                                   "Indirect count action has references");
15194                 flow_dv_counter_free(dev, idx);
15195                 return 0;
15196         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15197                 ret = flow_dv_aso_age_release(dev, idx);
15198                 if (ret)
15199                         /*
15200                          * In this case, the last flow has a reference will
15201                          * actually release the age action.
15202                          */
15203                         DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
15204                                 " released with references %d.", idx, ret);
15205                 return 0;
15206         case MLX5_INDIRECT_ACTION_TYPE_CT:
15207                 ret = flow_dv_aso_ct_release(dev, idx, error);
15208                 if (ret < 0)
15209                         return ret;
15210                 if (ret > 0)
15211                         DRV_LOG(DEBUG, "Connection tracking object %u still "
15212                                 "has references %d.", idx, ret);
15213                 return 0;
15214         default:
15215                 return rte_flow_error_set(error, ENOTSUP,
15216                                           RTE_FLOW_ERROR_TYPE_ACTION,
15217                                           NULL,
15218                                           "action type not supported");
15219         }
15220 }
15221
15222 /**
15223  * Updates in place shared RSS action configuration.
15224  *
15225  * @param[in] dev
15226  *   Pointer to the Ethernet device structure.
15227  * @param[in] idx
15228  *   The shared RSS action object ID to be updated.
15229  * @param[in] action_conf
15230  *   RSS action specification used to modify *shared_rss*.
15231  * @param[out] error
15232  *   Perform verbose error reporting if not NULL. Initialized in case of
15233  *   error only.
15234  *
15235  * @return
15236  *   0 on success, otherwise negative errno value.
15237  * @note: currently only support update of RSS queues.
15238  */
15239 static int
15240 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
15241                             const struct rte_flow_action_rss *action_conf,
15242                             struct rte_flow_error *error)
15243 {
15244         struct mlx5_priv *priv = dev->data->dev_private;
15245         struct mlx5_shared_action_rss *shared_rss =
15246             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
15247         int ret = 0;
15248         void *queue = NULL;
15249         void *queue_i = NULL;
15250         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
15251         bool dev_started = !!dev->data->dev_started;
15252
15253         if (!shared_rss)
15254                 return rte_flow_error_set(error, EINVAL,
15255                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15256                                           "invalid shared action to update");
15257         if (priv->obj_ops.ind_table_modify == NULL)
15258                 return rte_flow_error_set(error, ENOTSUP,
15259                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15260                                           "cannot modify indirection table");
15261         queue = mlx5_malloc(MLX5_MEM_ZERO,
15262                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
15263                             0, SOCKET_ID_ANY);
15264         if (!queue)
15265                 return rte_flow_error_set(error, ENOMEM,
15266                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15267                                           NULL,
15268                                           "cannot allocate resource memory");
15269         memcpy(queue, action_conf->queue, queue_size);
15270         MLX5_ASSERT(shared_rss->ind_tbl);
15271         rte_spinlock_lock(&shared_rss->action_rss_sl);
15272         queue_i = shared_rss->ind_tbl->queues;
15273         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
15274                                         queue, action_conf->queue_num,
15275                                         true /* standalone */,
15276                                         dev_started /* ref_new_qs */,
15277                                         dev_started /* deref_old_qs */);
15278         if (ret) {
15279                 ret = rte_flow_error_set(error, rte_errno,
15280                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15281                                           "cannot update indirection table");
15282         } else {
15283                 /* Restore the queue to indirect table internal queue. */
15284                 memcpy(queue_i, queue, queue_size);
15285                 shared_rss->ind_tbl->queues = queue_i;
15286                 shared_rss->origin.queue_num = action_conf->queue_num;
15287         }
15288         mlx5_free(queue);
15289         rte_spinlock_unlock(&shared_rss->action_rss_sl);
15290         return ret;
15291 }
15292
15293 /*
15294  * Updates in place conntrack context or direction.
15295  * Context update should be synchronized.
15296  *
15297  * @param[in] dev
15298  *   Pointer to the Ethernet device structure.
15299  * @param[in] idx
15300  *   The conntrack object ID to be updated.
15301  * @param[in] update
15302  *   Pointer to the structure of information to update.
15303  * @param[out] error
15304  *   Perform verbose error reporting if not NULL. Initialized in case of
15305  *   error only.
15306  *
15307  * @return
15308  *   0 on success, otherwise negative errno value.
15309  */
15310 static int
15311 __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,
15312                            const struct rte_flow_modify_conntrack *update,
15313                            struct rte_flow_error *error)
15314 {
15315         struct mlx5_priv *priv = dev->data->dev_private;
15316         struct mlx5_aso_ct_action *ct;
15317         const struct rte_flow_action_conntrack *new_prf;
15318         int ret = 0;
15319         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15320         uint32_t dev_idx;
15321
15322         if (PORT_ID(priv) != owner)
15323                 return rte_flow_error_set(error, EACCES,
15324                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15325                                           NULL,
15326                                           "CT object owned by another port");
15327         dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15328         ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15329         if (!ct->refcnt)
15330                 return rte_flow_error_set(error, ENOMEM,
15331                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15332                                           NULL,
15333                                           "CT object is inactive");
15334         new_prf = &update->new_ct;
15335         if (update->direction)
15336                 ct->is_original = !!new_prf->is_original_dir;
15337         if (update->state) {
15338                 /* Only validate the profile when it needs to be updated. */
15339                 ret = mlx5_validate_action_ct(dev, new_prf, error);
15340                 if (ret)
15341                         return ret;
15342                 ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
15343                 if (ret)
15344                         return rte_flow_error_set(error, EIO,
15345                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15346                                         NULL,
15347                                         "Failed to send CT context update WQE");
15348                 /* Block until ready or a failure. */
15349                 ret = mlx5_aso_ct_available(priv->sh, ct);
15350                 if (ret)
15351                         rte_flow_error_set(error, rte_errno,
15352                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15353                                            NULL,
15354                                            "Timeout to get the CT update");
15355         }
15356         return ret;
15357 }
15358
15359 /**
15360  * Updates in place shared action configuration, lock free,
15361  * (mutex should be acquired by caller).
15362  *
15363  * @param[in] dev
15364  *   Pointer to the Ethernet device structure.
15365  * @param[in] handle
15366  *   The indirect action object handle to be updated.
15367  * @param[in] update
15368  *   Action specification used to modify the action pointed by *handle*.
15369  *   *update* could be of same type with the action pointed by the *handle*
15370  *   handle argument, or some other structures like a wrapper, depending on
15371  *   the indirect action type.
15372  * @param[out] error
15373  *   Perform verbose error reporting if not NULL. Initialized in case of
15374  *   error only.
15375  *
15376  * @return
15377  *   0 on success, otherwise negative errno value.
15378  */
15379 int
15380 flow_dv_action_update(struct rte_eth_dev *dev,
15381                         struct rte_flow_action_handle *handle,
15382                         const void *update,
15383                         struct rte_flow_error *err)
15384 {
15385         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15386         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15387         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15388         const void *action_conf;
15389
15390         switch (type) {
15391         case MLX5_INDIRECT_ACTION_TYPE_RSS:
15392                 action_conf = ((const struct rte_flow_action *)update)->conf;
15393                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
15394         case MLX5_INDIRECT_ACTION_TYPE_CT:
15395                 return __flow_dv_action_ct_update(dev, idx, update, err);
15396         default:
15397                 return rte_flow_error_set(err, ENOTSUP,
15398                                           RTE_FLOW_ERROR_TYPE_ACTION,
15399                                           NULL,
15400                                           "action type update not supported");
15401         }
15402 }
15403
15404 /**
15405  * Destroy the meter sub policy table rules.
15406  * Lock free, (mutex should be acquired by caller).
15407  *
15408  * @param[in] dev
15409  *   Pointer to Ethernet device.
15410  * @param[in] sub_policy
15411  *   Pointer to meter sub policy table.
15412  */
15413 static void
15414 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
15415                              struct mlx5_flow_meter_sub_policy *sub_policy)
15416 {
15417         struct mlx5_priv *priv = dev->data->dev_private;
15418         struct mlx5_flow_tbl_data_entry *tbl;
15419         struct mlx5_flow_meter_policy *policy = sub_policy->main_policy;
15420         struct mlx5_flow_meter_info *next_fm;
15421         struct mlx5_sub_policy_color_rule *color_rule;
15422         void *tmp;
15423         uint32_t i;
15424
15425         for (i = 0; i < RTE_COLORS; i++) {
15426                 next_fm = NULL;
15427                 if (i == RTE_COLOR_GREEN && policy &&
15428                     policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR)
15429                         next_fm = mlx5_flow_meter_find(priv,
15430                                         policy->act_cnt[i].next_mtr_id, NULL);
15431                 RTE_TAILQ_FOREACH_SAFE(color_rule, &sub_policy->color_rules[i],
15432                                    next_port, tmp) {
15433                         claim_zero(mlx5_flow_os_destroy_flow(color_rule->rule));
15434                         tbl = container_of(color_rule->matcher->tbl,
15435                                            typeof(*tbl), tbl);
15436                         mlx5_list_unregister(tbl->matchers,
15437                                              &color_rule->matcher->entry);
15438                         TAILQ_REMOVE(&sub_policy->color_rules[i],
15439                                      color_rule, next_port);
15440                         mlx5_free(color_rule);
15441                         if (next_fm)
15442                                 mlx5_flow_meter_detach(priv, next_fm);
15443                 }
15444         }
15445         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15446                 if (sub_policy->rix_hrxq[i]) {
15447                         if (policy && !policy->is_hierarchy)
15448                                 mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
15449                         sub_policy->rix_hrxq[i] = 0;
15450                 }
15451                 if (sub_policy->jump_tbl[i]) {
15452                         flow_dv_tbl_resource_release(MLX5_SH(dev),
15453                                                      sub_policy->jump_tbl[i]);
15454                         sub_policy->jump_tbl[i] = NULL;
15455                 }
15456         }
15457         if (sub_policy->tbl_rsc) {
15458                 flow_dv_tbl_resource_release(MLX5_SH(dev),
15459                                              sub_policy->tbl_rsc);
15460                 sub_policy->tbl_rsc = NULL;
15461         }
15462 }
15463
15464 /**
15465  * Destroy policy rules, lock free,
15466  * (mutex should be acquired by caller).
15467  * Dispatcher for action type specific call.
15468  *
15469  * @param[in] dev
15470  *   Pointer to the Ethernet device structure.
15471  * @param[in] mtr_policy
15472  *   Meter policy struct.
15473  */
15474 static void
15475 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
15476                              struct mlx5_flow_meter_policy *mtr_policy)
15477 {
15478         uint32_t i, j;
15479         struct mlx5_flow_meter_sub_policy *sub_policy;
15480         uint16_t sub_policy_num;
15481
15482         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15483                 sub_policy_num = (mtr_policy->sub_policy_num >>
15484                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15485                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15486                 for (j = 0; j < sub_policy_num; j++) {
15487                         sub_policy = mtr_policy->sub_policys[i][j];
15488                         if (sub_policy)
15489                                 __flow_dv_destroy_sub_policy_rules(dev,
15490                                                                    sub_policy);
15491                 }
15492         }
15493 }
15494
15495 /**
15496  * Destroy policy action, lock free,
15497  * (mutex should be acquired by caller).
15498  * Dispatcher for action type specific call.
15499  *
15500  * @param[in] dev
15501  *   Pointer to the Ethernet device structure.
15502  * @param[in] mtr_policy
15503  *   Meter policy struct.
15504  */
15505 static void
15506 flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
15507                       struct mlx5_flow_meter_policy *mtr_policy)
15508 {
15509         struct rte_flow_action *rss_action;
15510         struct mlx5_flow_handle dev_handle;
15511         uint32_t i, j;
15512
15513         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15514                 if (mtr_policy->act_cnt[i].rix_mark) {
15515                         flow_dv_tag_release(dev,
15516                                 mtr_policy->act_cnt[i].rix_mark);
15517                         mtr_policy->act_cnt[i].rix_mark = 0;
15518                 }
15519                 if (mtr_policy->act_cnt[i].modify_hdr) {
15520                         dev_handle.dvh.modify_hdr =
15521                                 mtr_policy->act_cnt[i].modify_hdr;
15522                         flow_dv_modify_hdr_resource_release(dev, &dev_handle);
15523                 }
15524                 switch (mtr_policy->act_cnt[i].fate_action) {
15525                 case MLX5_FLOW_FATE_SHARED_RSS:
15526                         rss_action = mtr_policy->act_cnt[i].rss;
15527                         mlx5_free(rss_action);
15528                         break;
15529                 case MLX5_FLOW_FATE_PORT_ID:
15530                         if (mtr_policy->act_cnt[i].rix_port_id_action) {
15531                                 flow_dv_port_id_action_resource_release(dev,
15532                                 mtr_policy->act_cnt[i].rix_port_id_action);
15533                                 mtr_policy->act_cnt[i].rix_port_id_action = 0;
15534                         }
15535                         break;
15536                 case MLX5_FLOW_FATE_DROP:
15537                 case MLX5_FLOW_FATE_JUMP:
15538                         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15539                                 mtr_policy->act_cnt[i].dr_jump_action[j] =
15540                                                 NULL;
15541                         break;
15542                 default:
15543                         /*Queue action do nothing*/
15544                         break;
15545                 }
15546         }
15547         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15548                 mtr_policy->dr_drop_action[j] = NULL;
15549 }
15550
15551 /**
15552  * Create policy action per domain, lock free,
15553  * (mutex should be acquired by caller).
15554  * Dispatcher for action type specific call.
15555  *
15556  * @param[in] dev
15557  *   Pointer to the Ethernet device structure.
15558  * @param[in] mtr_policy
15559  *   Meter policy struct.
15560  * @param[in] action
15561  *   Action specification used to create meter actions.
15562  * @param[out] error
15563  *   Perform verbose error reporting if not NULL. Initialized in case of
15564  *   error only.
15565  *
15566  * @return
15567  *   0 on success, otherwise negative errno value.
15568  */
15569 static int
15570 __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
15571                         struct mlx5_flow_meter_policy *mtr_policy,
15572                         const struct rte_flow_action *actions[RTE_COLORS],
15573                         enum mlx5_meter_domain domain,
15574                         struct rte_mtr_error *error)
15575 {
15576         struct mlx5_priv *priv = dev->data->dev_private;
15577         struct rte_flow_error flow_err;
15578         const struct rte_flow_action *act;
15579         uint64_t action_flags;
15580         struct mlx5_flow_handle dh;
15581         struct mlx5_flow dev_flow;
15582         struct mlx5_flow_dv_port_id_action_resource port_id_action;
15583         int i, ret;
15584         uint8_t egress, transfer;
15585         struct mlx5_meter_policy_action_container *act_cnt = NULL;
15586         union {
15587                 struct mlx5_flow_dv_modify_hdr_resource res;
15588                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
15589                             sizeof(struct mlx5_modification_cmd) *
15590                             (MLX5_MAX_MODIFY_NUM + 1)];
15591         } mhdr_dummy;
15592         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
15593
15594         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15595         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15596         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
15597         memset(&dev_flow, 0, sizeof(struct mlx5_flow));
15598         memset(&port_id_action, 0,
15599                sizeof(struct mlx5_flow_dv_port_id_action_resource));
15600         memset(mhdr_res, 0, sizeof(*mhdr_res));
15601         mhdr_res->ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
15602                                        (egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
15603                                         MLX5DV_FLOW_TABLE_TYPE_NIC_RX);
15604         dev_flow.handle = &dh;
15605         dev_flow.dv.port_id_action = &port_id_action;
15606         dev_flow.external = true;
15607         for (i = 0; i < RTE_COLORS; i++) {
15608                 if (i < MLX5_MTR_RTE_COLORS)
15609                         act_cnt = &mtr_policy->act_cnt[i];
15610                 /* Skip the color policy actions creation. */
15611                 if ((i == RTE_COLOR_YELLOW && mtr_policy->skip_y) ||
15612                     (i == RTE_COLOR_GREEN && mtr_policy->skip_g))
15613                         continue;
15614                 action_flags = 0;
15615                 for (act = actions[i];
15616                      act && act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
15617                         switch (act->type) {
15618                         case RTE_FLOW_ACTION_TYPE_MARK:
15619                         {
15620                                 uint32_t tag_be = mlx5_flow_mark_set
15621                                         (((const struct rte_flow_action_mark *)
15622                                         (act->conf))->id);
15623
15624                                 if (i >= MLX5_MTR_RTE_COLORS)
15625                                         return -rte_mtr_error_set(error,
15626                                           ENOTSUP,
15627                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15628                                           NULL,
15629                                           "cannot create policy "
15630                                           "mark action for this color");
15631                                 if (flow_dv_tag_resource_register(dev, tag_be,
15632                                                   &dev_flow, &flow_err))
15633                                         return -rte_mtr_error_set(error,
15634                                         ENOTSUP,
15635                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15636                                         NULL,
15637                                         "cannot setup policy mark action");
15638                                 MLX5_ASSERT(dev_flow.dv.tag_resource);
15639                                 act_cnt->rix_mark =
15640                                         dev_flow.handle->dvh.rix_tag;
15641                                 action_flags |= MLX5_FLOW_ACTION_MARK;
15642                                 mtr_policy->mark = 1;
15643                                 break;
15644                         }
15645                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
15646                                 if (i >= MLX5_MTR_RTE_COLORS)
15647                                         return -rte_mtr_error_set(error,
15648                                           ENOTSUP,
15649                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15650                                           NULL,
15651                                           "cannot create policy "
15652                                           "set tag action for this color");
15653                                 if (flow_dv_convert_action_set_tag
15654                                 (dev, mhdr_res,
15655                                 (const struct rte_flow_action_set_tag *)
15656                                 act->conf,  &flow_err))
15657                                         return -rte_mtr_error_set(error,
15658                                         ENOTSUP,
15659                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15660                                         NULL, "cannot convert policy "
15661                                         "set tag action");
15662                                 if (!mhdr_res->actions_num)
15663                                         return -rte_mtr_error_set(error,
15664                                         ENOTSUP,
15665                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15666                                         NULL, "cannot find policy "
15667                                         "set tag action");
15668                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
15669                                 break;
15670                         case RTE_FLOW_ACTION_TYPE_DROP:
15671                         {
15672                                 struct mlx5_flow_mtr_mng *mtrmng =
15673                                                 priv->sh->mtrmng;
15674                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15675
15676                                 /*
15677                                  * Create the drop table with
15678                                  * METER DROP level.
15679                                  */
15680                                 if (!mtrmng->drop_tbl[domain]) {
15681                                         mtrmng->drop_tbl[domain] =
15682                                         flow_dv_tbl_resource_get(dev,
15683                                         MLX5_FLOW_TABLE_LEVEL_METER,
15684                                         egress, transfer, false, NULL, 0,
15685                                         0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
15686                                         if (!mtrmng->drop_tbl[domain])
15687                                                 return -rte_mtr_error_set
15688                                         (error, ENOTSUP,
15689                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15690                                         NULL,
15691                                         "Failed to create meter drop table");
15692                                 }
15693                                 tbl_data = container_of
15694                                 (mtrmng->drop_tbl[domain],
15695                                 struct mlx5_flow_tbl_data_entry, tbl);
15696                                 if (i < MLX5_MTR_RTE_COLORS) {
15697                                         act_cnt->dr_jump_action[domain] =
15698                                                 tbl_data->jump.action;
15699                                         act_cnt->fate_action =
15700                                                 MLX5_FLOW_FATE_DROP;
15701                                 }
15702                                 if (i == RTE_COLOR_RED)
15703                                         mtr_policy->dr_drop_action[domain] =
15704                                                 tbl_data->jump.action;
15705                                 action_flags |= MLX5_FLOW_ACTION_DROP;
15706                                 break;
15707                         }
15708                         case RTE_FLOW_ACTION_TYPE_QUEUE:
15709                         {
15710                                 if (i >= MLX5_MTR_RTE_COLORS)
15711                                         return -rte_mtr_error_set(error,
15712                                         ENOTSUP,
15713                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15714                                         NULL, "cannot create policy "
15715                                         "fate queue for this color");
15716                                 act_cnt->queue =
15717                                 ((const struct rte_flow_action_queue *)
15718                                         (act->conf))->index;
15719                                 act_cnt->fate_action =
15720                                         MLX5_FLOW_FATE_QUEUE;
15721                                 dev_flow.handle->fate_action =
15722                                         MLX5_FLOW_FATE_QUEUE;
15723                                 mtr_policy->is_queue = 1;
15724                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
15725                                 break;
15726                         }
15727                         case RTE_FLOW_ACTION_TYPE_RSS:
15728                         {
15729                                 int rss_size;
15730
15731                                 if (i >= MLX5_MTR_RTE_COLORS)
15732                                         return -rte_mtr_error_set(error,
15733                                           ENOTSUP,
15734                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15735                                           NULL,
15736                                           "cannot create policy "
15737                                           "rss action for this color");
15738                                 /*
15739                                  * Save RSS conf into policy struct
15740                                  * for translate stage.
15741                                  */
15742                                 rss_size = (int)rte_flow_conv
15743                                         (RTE_FLOW_CONV_OP_ACTION,
15744                                         NULL, 0, act, &flow_err);
15745                                 if (rss_size <= 0)
15746                                         return -rte_mtr_error_set(error,
15747                                           ENOTSUP,
15748                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15749                                           NULL, "Get the wrong "
15750                                           "rss action struct size");
15751                                 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
15752                                                 rss_size, 0, SOCKET_ID_ANY);
15753                                 if (!act_cnt->rss)
15754                                         return -rte_mtr_error_set(error,
15755                                           ENOTSUP,
15756                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15757                                           NULL,
15758                                           "Fail to malloc rss action memory");
15759                                 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
15760                                         act_cnt->rss, rss_size,
15761                                         act, &flow_err);
15762                                 if (ret < 0)
15763                                         return -rte_mtr_error_set(error,
15764                                           ENOTSUP,
15765                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15766                                           NULL, "Fail to save "
15767                                           "rss action into policy struct");
15768                                 act_cnt->fate_action =
15769                                         MLX5_FLOW_FATE_SHARED_RSS;
15770                                 action_flags |= MLX5_FLOW_ACTION_RSS;
15771                                 break;
15772                         }
15773                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
15774                         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
15775                         {
15776                                 struct mlx5_flow_dv_port_id_action_resource
15777                                         port_id_resource;
15778                                 uint32_t port_id = 0;
15779
15780                                 if (i >= MLX5_MTR_RTE_COLORS)
15781                                         return -rte_mtr_error_set(error,
15782                                         ENOTSUP,
15783                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15784                                         NULL, "cannot create policy "
15785                                         "port action for this color");
15786                                 memset(&port_id_resource, 0,
15787                                         sizeof(port_id_resource));
15788                                 if (flow_dv_translate_action_port_id(dev, act,
15789                                                 &port_id, &flow_err))
15790                                         return -rte_mtr_error_set(error,
15791                                         ENOTSUP,
15792                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15793                                         NULL, "cannot translate "
15794                                         "policy port action");
15795                                 port_id_resource.port_id = port_id;
15796                                 if (flow_dv_port_id_action_resource_register
15797                                         (dev, &port_id_resource,
15798                                         &dev_flow, &flow_err))
15799                                         return -rte_mtr_error_set(error,
15800                                         ENOTSUP,
15801                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15802                                         NULL, "cannot setup "
15803                                         "policy port action");
15804                                 act_cnt->rix_port_id_action =
15805                                         dev_flow.handle->rix_port_id_action;
15806                                 act_cnt->fate_action =
15807                                         MLX5_FLOW_FATE_PORT_ID;
15808                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
15809                                 break;
15810                         }
15811                         case RTE_FLOW_ACTION_TYPE_JUMP:
15812                         {
15813                                 uint32_t jump_group = 0;
15814                                 uint32_t table = 0;
15815                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15816                                 struct flow_grp_info grp_info = {
15817                                         .external = !!dev_flow.external,
15818                                         .transfer = !!transfer,
15819                                         .fdb_def_rule = !!priv->fdb_def_rule,
15820                                         .std_tbl_fix = 0,
15821                                         .skip_scale = dev_flow.skip_scale &
15822                                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
15823                                 };
15824                                 struct mlx5_flow_meter_sub_policy *sub_policy =
15825                                         mtr_policy->sub_policys[domain][0];
15826
15827                                 if (i >= MLX5_MTR_RTE_COLORS)
15828                                         return -rte_mtr_error_set(error,
15829                                           ENOTSUP,
15830                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15831                                           NULL,
15832                                           "cannot create policy "
15833                                           "jump action for this color");
15834                                 jump_group =
15835                                 ((const struct rte_flow_action_jump *)
15836                                                         act->conf)->group;
15837                                 if (mlx5_flow_group_to_table(dev, NULL,
15838                                                        jump_group,
15839                                                        &table,
15840                                                        &grp_info, &flow_err))
15841                                         return -rte_mtr_error_set(error,
15842                                         ENOTSUP,
15843                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15844                                         NULL, "cannot setup "
15845                                         "policy jump action");
15846                                 sub_policy->jump_tbl[i] =
15847                                 flow_dv_tbl_resource_get(dev,
15848                                         table, egress,
15849                                         transfer,
15850                                         !!dev_flow.external,
15851                                         NULL, jump_group, 0,
15852                                         0, &flow_err);
15853                                 if
15854                                 (!sub_policy->jump_tbl[i])
15855                                         return  -rte_mtr_error_set(error,
15856                                         ENOTSUP,
15857                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15858                                         NULL, "cannot create jump action.");
15859                                 tbl_data = container_of
15860                                 (sub_policy->jump_tbl[i],
15861                                 struct mlx5_flow_tbl_data_entry, tbl);
15862                                 act_cnt->dr_jump_action[domain] =
15863                                         tbl_data->jump.action;
15864                                 act_cnt->fate_action =
15865                                         MLX5_FLOW_FATE_JUMP;
15866                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
15867                                 break;
15868                         }
15869                         /*
15870                          * No need to check meter hierarchy for Y or R colors
15871                          * here since it is done in the validation stage.
15872                          */
15873                         case RTE_FLOW_ACTION_TYPE_METER:
15874                         {
15875                                 const struct rte_flow_action_meter *mtr;
15876                                 struct mlx5_flow_meter_info *next_fm;
15877                                 struct mlx5_flow_meter_policy *next_policy;
15878                                 struct rte_flow_action tag_action;
15879                                 struct mlx5_rte_flow_action_set_tag set_tag;
15880                                 uint32_t next_mtr_idx = 0;
15881
15882                                 mtr = act->conf;
15883                                 next_fm = mlx5_flow_meter_find(priv,
15884                                                         mtr->mtr_id,
15885                                                         &next_mtr_idx);
15886                                 if (!next_fm)
15887                                         return -rte_mtr_error_set(error, EINVAL,
15888                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15889                                                 "Fail to find next meter.");
15890                                 if (next_fm->def_policy)
15891                                         return -rte_mtr_error_set(error, EINVAL,
15892                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15893                                 "Hierarchy only supports termination meter.");
15894                                 next_policy = mlx5_flow_meter_policy_find(dev,
15895                                                 next_fm->policy_id, NULL);
15896                                 MLX5_ASSERT(next_policy);
15897                                 if (next_fm->drop_cnt) {
15898                                         set_tag.id =
15899                                                 (enum modify_reg)
15900                                                 mlx5_flow_get_reg_id(dev,
15901                                                 MLX5_MTR_ID,
15902                                                 0,
15903                                                 (struct rte_flow_error *)error);
15904                                         set_tag.offset = (priv->mtr_reg_share ?
15905                                                 MLX5_MTR_COLOR_BITS : 0);
15906                                         set_tag.length = (priv->mtr_reg_share ?
15907                                                MLX5_MTR_IDLE_BITS_IN_COLOR_REG :
15908                                                MLX5_REG_BITS);
15909                                         set_tag.data = next_mtr_idx;
15910                                         tag_action.type =
15911                                                 (enum rte_flow_action_type)
15912                                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG;
15913                                         tag_action.conf = &set_tag;
15914                                         if (flow_dv_convert_action_set_reg
15915                                                 (mhdr_res, &tag_action,
15916                                                 (struct rte_flow_error *)error))
15917                                                 return -rte_errno;
15918                                         action_flags |=
15919                                                 MLX5_FLOW_ACTION_SET_TAG;
15920                                 }
15921                                 act_cnt->fate_action = MLX5_FLOW_FATE_MTR;
15922                                 act_cnt->next_mtr_id = next_fm->meter_id;
15923                                 act_cnt->next_sub_policy = NULL;
15924                                 mtr_policy->is_hierarchy = 1;
15925                                 mtr_policy->dev = next_policy->dev;
15926                                 if (next_policy->mark)
15927                                         mtr_policy->mark = 1;
15928                                 action_flags |=
15929                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
15930                                 break;
15931                         }
15932                         default:
15933                                 return -rte_mtr_error_set(error, ENOTSUP,
15934                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15935                                           NULL, "action type not supported");
15936                         }
15937                         if (action_flags & MLX5_FLOW_ACTION_SET_TAG) {
15938                                 /* create modify action if needed. */
15939                                 dev_flow.dv.group = 1;
15940                                 if (flow_dv_modify_hdr_resource_register
15941                                         (dev, mhdr_res, &dev_flow, &flow_err))
15942                                         return -rte_mtr_error_set(error,
15943                                                 ENOTSUP,
15944                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15945                                                 NULL, "cannot register policy "
15946                                                 "set tag action");
15947                                 act_cnt->modify_hdr =
15948                                         dev_flow.handle->dvh.modify_hdr;
15949                         }
15950                 }
15951         }
15952         return 0;
15953 }
15954
15955 /**
15956  * Create policy action per domain, lock free,
15957  * (mutex should be acquired by caller).
15958  * Dispatcher for action type specific call.
15959  *
15960  * @param[in] dev
15961  *   Pointer to the Ethernet device structure.
15962  * @param[in] mtr_policy
15963  *   Meter policy struct.
15964  * @param[in] action
15965  *   Action specification used to create meter actions.
15966  * @param[out] error
15967  *   Perform verbose error reporting if not NULL. Initialized in case of
15968  *   error only.
15969  *
15970  * @return
15971  *   0 on success, otherwise negative errno value.
15972  */
15973 static int
15974 flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
15975                       struct mlx5_flow_meter_policy *mtr_policy,
15976                       const struct rte_flow_action *actions[RTE_COLORS],
15977                       struct rte_mtr_error *error)
15978 {
15979         int ret, i;
15980         uint16_t sub_policy_num;
15981
15982         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15983                 sub_policy_num = (mtr_policy->sub_policy_num >>
15984                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15985                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15986                 if (sub_policy_num) {
15987                         ret = __flow_dv_create_domain_policy_acts(dev,
15988                                 mtr_policy, actions,
15989                                 (enum mlx5_meter_domain)i, error);
15990                         /* Cleaning resource is done in the caller level. */
15991                         if (ret)
15992                                 return ret;
15993                 }
15994         }
15995         return 0;
15996 }
15997
15998 /**
15999  * Query a DV flow rule for its statistics via DevX.
16000  *
16001  * @param[in] dev
16002  *   Pointer to Ethernet device.
16003  * @param[in] cnt_idx
16004  *   Index to the flow counter.
16005  * @param[out] data
16006  *   Data retrieved by the query.
16007  * @param[out] error
16008  *   Perform verbose error reporting if not NULL.
16009  *
16010  * @return
16011  *   0 on success, a negative errno value otherwise and rte_errno is set.
16012  */
16013 static int
16014 flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
16015                     struct rte_flow_error *error)
16016 {
16017         struct mlx5_priv *priv = dev->data->dev_private;
16018         struct rte_flow_query_count *qc = data;
16019
16020         if (!priv->sh->cdev->config.devx)
16021                 return rte_flow_error_set(error, ENOTSUP,
16022                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16023                                           NULL,
16024                                           "counters are not supported");
16025         if (cnt_idx) {
16026                 uint64_t pkts, bytes;
16027                 struct mlx5_flow_counter *cnt;
16028                 int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
16029
16030                 if (err)
16031                         return rte_flow_error_set(error, -err,
16032                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16033                                         NULL, "cannot read counters");
16034                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
16035                 qc->hits_set = 1;
16036                 qc->bytes_set = 1;
16037                 qc->hits = pkts - cnt->hits;
16038                 qc->bytes = bytes - cnt->bytes;
16039                 if (qc->reset) {
16040                         cnt->hits = pkts;
16041                         cnt->bytes = bytes;
16042                 }
16043                 return 0;
16044         }
16045         return rte_flow_error_set(error, EINVAL,
16046                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16047                                   NULL,
16048                                   "counters are not available");
16049 }
16050
16051 int
16052 flow_dv_action_query(struct rte_eth_dev *dev,
16053                      const struct rte_flow_action_handle *handle, void *data,
16054                      struct rte_flow_error *error)
16055 {
16056         struct mlx5_age_param *age_param;
16057         struct rte_flow_query_age *resp;
16058         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
16059         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
16060         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
16061         struct mlx5_priv *priv = dev->data->dev_private;
16062         struct mlx5_aso_ct_action *ct;
16063         uint16_t owner;
16064         uint32_t dev_idx;
16065
16066         switch (type) {
16067         case MLX5_INDIRECT_ACTION_TYPE_AGE:
16068                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
16069                 resp = data;
16070                 resp->aged = __atomic_load_n(&age_param->state,
16071                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
16072                                                                           1 : 0;
16073                 resp->sec_since_last_hit_valid = !resp->aged;
16074                 if (resp->sec_since_last_hit_valid)
16075                         resp->sec_since_last_hit = __atomic_load_n
16076                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
16077                 return 0;
16078         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
16079                 return flow_dv_query_count(dev, idx, data, error);
16080         case MLX5_INDIRECT_ACTION_TYPE_CT:
16081                 owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
16082                 if (owner != PORT_ID(priv))
16083                         return rte_flow_error_set(error, EACCES,
16084                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16085                                         NULL,
16086                                         "CT object owned by another port");
16087                 dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
16088                 ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
16089                 MLX5_ASSERT(ct);
16090                 if (!ct->refcnt)
16091                         return rte_flow_error_set(error, EFAULT,
16092                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16093                                         NULL,
16094                                         "CT object is inactive");
16095                 ((struct rte_flow_action_conntrack *)data)->peer_port =
16096                                                         ct->peer;
16097                 ((struct rte_flow_action_conntrack *)data)->is_original_dir =
16098                                                         ct->is_original;
16099                 if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))
16100                         return rte_flow_error_set(error, EIO,
16101                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16102                                         NULL,
16103                                         "Failed to query CT context");
16104                 return 0;
16105         default:
16106                 return rte_flow_error_set(error, ENOTSUP,
16107                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
16108                                           "action type query not supported");
16109         }
16110 }
16111
16112 /**
16113  * Query a flow rule AGE action for aging information.
16114  *
16115  * @param[in] dev
16116  *   Pointer to Ethernet device.
16117  * @param[in] flow
16118  *   Pointer to the sub flow.
16119  * @param[out] data
16120  *   data retrieved by the query.
16121  * @param[out] error
16122  *   Perform verbose error reporting if not NULL.
16123  *
16124  * @return
16125  *   0 on success, a negative errno value otherwise and rte_errno is set.
16126  */
16127 static int
16128 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
16129                   void *data, struct rte_flow_error *error)
16130 {
16131         struct rte_flow_query_age *resp = data;
16132         struct mlx5_age_param *age_param;
16133
16134         if (flow->age) {
16135                 struct mlx5_aso_age_action *act =
16136                                      flow_aso_age_get_by_idx(dev, flow->age);
16137
16138                 age_param = &act->age_params;
16139         } else if (flow->counter) {
16140                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
16141
16142                 if (!age_param || !age_param->timeout)
16143                         return rte_flow_error_set
16144                                         (error, EINVAL,
16145                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16146                                          NULL, "cannot read age data");
16147         } else {
16148                 return rte_flow_error_set(error, EINVAL,
16149                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16150                                           NULL, "age data not available");
16151         }
16152         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
16153                                      AGE_TMOUT ? 1 : 0;
16154         resp->sec_since_last_hit_valid = !resp->aged;
16155         if (resp->sec_since_last_hit_valid)
16156                 resp->sec_since_last_hit = __atomic_load_n
16157                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
16158         return 0;
16159 }
16160
16161 /**
16162  * Query a flow.
16163  *
16164  * @see rte_flow_query()
16165  * @see rte_flow_ops
16166  */
16167 static int
16168 flow_dv_query(struct rte_eth_dev *dev,
16169               struct rte_flow *flow __rte_unused,
16170               const struct rte_flow_action *actions __rte_unused,
16171               void *data __rte_unused,
16172               struct rte_flow_error *error __rte_unused)
16173 {
16174         int ret = -EINVAL;
16175
16176         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
16177                 switch (actions->type) {
16178                 case RTE_FLOW_ACTION_TYPE_VOID:
16179                         break;
16180                 case RTE_FLOW_ACTION_TYPE_COUNT:
16181                         ret = flow_dv_query_count(dev, flow->counter, data,
16182                                                   error);
16183                         break;
16184                 case RTE_FLOW_ACTION_TYPE_AGE:
16185                         ret = flow_dv_query_age(dev, flow, data, error);
16186                         break;
16187                 default:
16188                         return rte_flow_error_set(error, ENOTSUP,
16189                                                   RTE_FLOW_ERROR_TYPE_ACTION,
16190                                                   actions,
16191                                                   "action not supported");
16192                 }
16193         }
16194         return ret;
16195 }
16196
16197 /**
16198  * Destroy the meter table set.
16199  * Lock free, (mutex should be acquired by caller).
16200  *
16201  * @param[in] dev
16202  *   Pointer to Ethernet device.
16203  * @param[in] fm
16204  *   Meter information table.
16205  */
16206 static void
16207 flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
16208                         struct mlx5_flow_meter_info *fm)
16209 {
16210         struct mlx5_priv *priv = dev->data->dev_private;
16211         int i;
16212
16213         if (!fm || !priv->sh->config.dv_flow_en)
16214                 return;
16215         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16216                 if (fm->drop_rule[i]) {
16217                         claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
16218                         fm->drop_rule[i] = NULL;
16219                 }
16220         }
16221 }
16222
16223 static void
16224 flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
16225 {
16226         struct mlx5_priv *priv = dev->data->dev_private;
16227         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16228         struct mlx5_flow_tbl_data_entry *tbl;
16229         int i, j;
16230
16231         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16232                 if (mtrmng->def_rule[i]) {
16233                         claim_zero(mlx5_flow_os_destroy_flow
16234                                         (mtrmng->def_rule[i]));
16235                         mtrmng->def_rule[i] = NULL;
16236                 }
16237                 if (mtrmng->def_matcher[i]) {
16238                         tbl = container_of(mtrmng->def_matcher[i]->tbl,
16239                                 struct mlx5_flow_tbl_data_entry, tbl);
16240                         mlx5_list_unregister(tbl->matchers,
16241                                              &mtrmng->def_matcher[i]->entry);
16242                         mtrmng->def_matcher[i] = NULL;
16243                 }
16244                 for (j = 0; j < MLX5_REG_BITS; j++) {
16245                         if (mtrmng->drop_matcher[i][j]) {
16246                                 tbl =
16247                                 container_of(mtrmng->drop_matcher[i][j]->tbl,
16248                                              struct mlx5_flow_tbl_data_entry,
16249                                              tbl);
16250                                 mlx5_list_unregister(tbl->matchers,
16251                                             &mtrmng->drop_matcher[i][j]->entry);
16252                                 mtrmng->drop_matcher[i][j] = NULL;
16253                         }
16254                 }
16255                 if (mtrmng->drop_tbl[i]) {
16256                         flow_dv_tbl_resource_release(MLX5_SH(dev),
16257                                 mtrmng->drop_tbl[i]);
16258                         mtrmng->drop_tbl[i] = NULL;
16259                 }
16260         }
16261 }
16262
16263 /* Number of meter flow actions, count and jump or count and drop. */
16264 #define METER_ACTIONS 2
16265
16266 static void
16267 __flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
16268                                     enum mlx5_meter_domain domain)
16269 {
16270         struct mlx5_priv *priv = dev->data->dev_private;
16271         struct mlx5_flow_meter_def_policy *def_policy =
16272                         priv->sh->mtrmng->def_policy[domain];
16273
16274         __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
16275         mlx5_free(def_policy);
16276         priv->sh->mtrmng->def_policy[domain] = NULL;
16277 }
16278
16279 /**
16280  * Destroy the default policy table set.
16281  *
16282  * @param[in] dev
16283  *   Pointer to Ethernet device.
16284  */
16285 static void
16286 flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
16287 {
16288         struct mlx5_priv *priv = dev->data->dev_private;
16289         int i;
16290
16291         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
16292                 if (priv->sh->mtrmng->def_policy[i])
16293                         __flow_dv_destroy_domain_def_policy(dev,
16294                                         (enum mlx5_meter_domain)i);
16295         priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
16296 }
16297
16298 static int
16299 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
16300                         uint32_t color_reg_c_idx,
16301                         enum rte_color color, void *matcher_object,
16302                         int actions_n, void *actions,
16303                         bool match_src_port, const struct rte_flow_item *item,
16304                         void **rule, const struct rte_flow_attr *attr)
16305 {
16306         int ret;
16307         struct mlx5_flow_dv_match_params value = {
16308                 .size = sizeof(value.buf),
16309         };
16310         struct mlx5_flow_dv_match_params matcher = {
16311                 .size = sizeof(matcher.buf),
16312         };
16313         struct mlx5_priv *priv = dev->data->dev_private;
16314         uint8_t misc_mask;
16315
16316         if (match_src_port && priv->sh->esw_mode) {
16317                 if (flow_dv_translate_item_port_id(dev, matcher.buf,
16318                                                    value.buf, item, attr)) {
16319                         DRV_LOG(ERR, "Failed to create meter policy%d flow's"
16320                                 " value with port.", color);
16321                         return -1;
16322                 }
16323         }
16324         flow_dv_match_meta_reg(matcher.buf, value.buf,
16325                                (enum modify_reg)color_reg_c_idx,
16326                                rte_col_2_mlx5_col(color), UINT32_MAX);
16327         misc_mask = flow_dv_matcher_enable(value.buf);
16328         __flow_dv_adjust_buf_size(&value.size, misc_mask);
16329         ret = mlx5_flow_os_create_flow(matcher_object, (void *)&value,
16330                                        actions_n, actions, rule);
16331         if (ret) {
16332                 DRV_LOG(ERR, "Failed to create meter policy%d flow.", color);
16333                 return -1;
16334         }
16335         return 0;
16336 }
16337
16338 static int
16339 __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
16340                         uint32_t color_reg_c_idx,
16341                         uint16_t priority,
16342                         struct mlx5_flow_meter_sub_policy *sub_policy,
16343                         const struct rte_flow_attr *attr,
16344                         bool match_src_port,
16345                         const struct rte_flow_item *item,
16346                         struct mlx5_flow_dv_matcher **policy_matcher,
16347                         struct rte_flow_error *error)
16348 {
16349         struct mlx5_list_entry *entry;
16350         struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
16351         struct mlx5_flow_dv_matcher matcher = {
16352                 .mask = {
16353                         .size = sizeof(matcher.mask.buf),
16354                 },
16355                 .tbl = tbl_rsc,
16356         };
16357         struct mlx5_flow_dv_match_params value = {
16358                 .size = sizeof(value.buf),
16359         };
16360         struct mlx5_flow_cb_ctx ctx = {
16361                 .error = error,
16362                 .data = &matcher,
16363         };
16364         struct mlx5_flow_tbl_data_entry *tbl_data;
16365         struct mlx5_priv *priv = dev->data->dev_private;
16366         const uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
16367
16368         if (match_src_port && priv->sh->esw_mode) {
16369                 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
16370                                                    value.buf, item, attr)) {
16371                         DRV_LOG(ERR, "Failed to register meter policy%d matcher"
16372                                 " with port.", priority);
16373                         return -1;
16374                 }
16375         }
16376         tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
16377         if (priority < RTE_COLOR_RED)
16378                 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16379                         (enum modify_reg)color_reg_c_idx, 0, color_mask);
16380         matcher.priority = priority;
16381         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
16382                                     matcher.mask.size);
16383         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16384         if (!entry) {
16385                 DRV_LOG(ERR, "Failed to register meter drop matcher.");
16386                 return -1;
16387         }
16388         *policy_matcher =
16389                 container_of(entry, struct mlx5_flow_dv_matcher, entry);
16390         return 0;
16391 }
16392
16393 /**
16394  * Create the policy rules per domain.
16395  *
16396  * @param[in] dev
16397  *   Pointer to Ethernet device.
16398  * @param[in] sub_policy
16399  *    Pointer to sub policy table..
16400  * @param[in] egress
16401  *   Direction of the table.
16402  * @param[in] transfer
16403  *   E-Switch or NIC flow.
16404  * @param[in] acts
16405  *   Pointer to policy action list per color.
16406  *
16407  * @return
16408  *   0 on success, -1 otherwise.
16409  */
16410 static int
16411 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
16412                 struct mlx5_flow_meter_sub_policy *sub_policy,
16413                 uint8_t egress, uint8_t transfer, bool match_src_port,
16414                 struct mlx5_meter_policy_acts acts[RTE_COLORS])
16415 {
16416         struct mlx5_priv *priv = dev->data->dev_private;
16417         struct rte_flow_error flow_err;
16418         uint32_t color_reg_c_idx;
16419         struct rte_flow_attr attr = {
16420                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
16421                 .priority = 0,
16422                 .ingress = 0,
16423                 .egress = !!egress,
16424                 .transfer = !!transfer,
16425                 .reserved = 0,
16426         };
16427         int i;
16428         int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
16429         struct mlx5_sub_policy_color_rule *color_rule;
16430         bool svport_match;
16431         struct mlx5_sub_policy_color_rule *tmp_rules[RTE_COLORS] = {NULL};
16432
16433         if (ret < 0)
16434                 return -1;
16435         /* Create policy table with POLICY level. */
16436         if (!sub_policy->tbl_rsc)
16437                 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
16438                                 MLX5_FLOW_TABLE_LEVEL_POLICY,
16439                                 egress, transfer, false, NULL, 0, 0,
16440                                 sub_policy->idx, &flow_err);
16441         if (!sub_policy->tbl_rsc) {
16442                 DRV_LOG(ERR,
16443                         "Failed to create meter sub policy table.");
16444                 return -1;
16445         }
16446         /* Prepare matchers. */
16447         color_reg_c_idx = ret;
16448         for (i = 0; i < RTE_COLORS; i++) {
16449                 TAILQ_INIT(&sub_policy->color_rules[i]);
16450                 if (!acts[i].actions_n)
16451                         continue;
16452                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
16453                                 sizeof(struct mlx5_sub_policy_color_rule),
16454                                 0, SOCKET_ID_ANY);
16455                 if (!color_rule) {
16456                         DRV_LOG(ERR, "No memory to create color rule.");
16457                         goto err_exit;
16458                 }
16459                 tmp_rules[i] = color_rule;
16460                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
16461                                   color_rule, next_port);
16462                 color_rule->src_port = priv->representor_id;
16463                 /* No use. */
16464                 attr.priority = i;
16465                 /* Create matchers for colors. */
16466                 svport_match = (i != RTE_COLOR_RED) ? match_src_port : false;
16467                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
16468                                 MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
16469                                 &attr, svport_match, NULL,
16470                                 &color_rule->matcher, &flow_err)) {
16471                         DRV_LOG(ERR, "Failed to create color%u matcher.", i);
16472                         goto err_exit;
16473                 }
16474                 /* Create flow, matching color. */
16475                 if (__flow_dv_create_policy_flow(dev,
16476                                 color_reg_c_idx, (enum rte_color)i,
16477                                 color_rule->matcher->matcher_object,
16478                                 acts[i].actions_n, acts[i].dv_actions,
16479                                 svport_match, NULL, &color_rule->rule,
16480                                 &attr)) {
16481                         DRV_LOG(ERR, "Failed to create color%u rule.", i);
16482                         goto err_exit;
16483                 }
16484         }
16485         return 0;
16486 err_exit:
16487         /* All the policy rules will be cleared. */
16488         do {
16489                 color_rule = tmp_rules[i];
16490                 if (color_rule) {
16491                         if (color_rule->rule)
16492                                 mlx5_flow_os_destroy_flow(color_rule->rule);
16493                         if (color_rule->matcher) {
16494                                 struct mlx5_flow_tbl_data_entry *tbl =
16495                                         container_of(color_rule->matcher->tbl,
16496                                                      typeof(*tbl), tbl);
16497                                 mlx5_list_unregister(tbl->matchers,
16498                                                 &color_rule->matcher->entry);
16499                         }
16500                         TAILQ_REMOVE(&sub_policy->color_rules[i],
16501                                      color_rule, next_port);
16502                         mlx5_free(color_rule);
16503                 }
16504         } while (i--);
16505         return -1;
16506 }
16507
16508 static int
16509 __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
16510                         struct mlx5_flow_meter_policy *mtr_policy,
16511                         struct mlx5_flow_meter_sub_policy *sub_policy,
16512                         uint32_t domain)
16513 {
16514         struct mlx5_priv *priv = dev->data->dev_private;
16515         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16516         struct mlx5_flow_dv_tag_resource *tag;
16517         struct mlx5_flow_dv_port_id_action_resource *port_action;
16518         struct mlx5_hrxq *hrxq;
16519         struct mlx5_flow_meter_info *next_fm = NULL;
16520         struct mlx5_flow_meter_policy *next_policy;
16521         struct mlx5_flow_meter_sub_policy *next_sub_policy;
16522         struct mlx5_flow_tbl_data_entry *tbl_data;
16523         struct rte_flow_error error;
16524         uint8_t egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16525         uint8_t transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16526         bool mtr_first = egress || (transfer && priv->representor_id != UINT16_MAX);
16527         bool match_src_port = false;
16528         int i;
16529
16530         /* If RSS or Queue, no previous actions / rules is created. */
16531         for (i = 0; i < RTE_COLORS; i++) {
16532                 acts[i].actions_n = 0;
16533                 if (i == RTE_COLOR_RED) {
16534                         /* Only support drop on red. */
16535                         acts[i].dv_actions[0] =
16536                                 mtr_policy->dr_drop_action[domain];
16537                         acts[i].actions_n = 1;
16538                         continue;
16539                 }
16540                 if (i == RTE_COLOR_GREEN &&
16541                     mtr_policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR) {
16542                         struct rte_flow_attr attr = {
16543                                 .transfer = transfer
16544                         };
16545
16546                         next_fm = mlx5_flow_meter_find(priv,
16547                                         mtr_policy->act_cnt[i].next_mtr_id,
16548                                         NULL);
16549                         if (!next_fm) {
16550                                 DRV_LOG(ERR,
16551                                         "Failed to get next hierarchy meter.");
16552                                 goto err_exit;
16553                         }
16554                         if (mlx5_flow_meter_attach(priv, next_fm,
16555                                                    &attr, &error)) {
16556                                 DRV_LOG(ERR, "%s", error.message);
16557                                 next_fm = NULL;
16558                                 goto err_exit;
16559                         }
16560                         /* Meter action must be the first for TX. */
16561                         if (mtr_first) {
16562                                 acts[i].dv_actions[acts[i].actions_n] =
16563                                         next_fm->meter_action;
16564                                 acts[i].actions_n++;
16565                         }
16566                 }
16567                 if (mtr_policy->act_cnt[i].rix_mark) {
16568                         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
16569                                         mtr_policy->act_cnt[i].rix_mark);
16570                         if (!tag) {
16571                                 DRV_LOG(ERR, "Failed to find "
16572                                 "mark action for policy.");
16573                                 goto err_exit;
16574                         }
16575                         acts[i].dv_actions[acts[i].actions_n] = tag->action;
16576                         acts[i].actions_n++;
16577                 }
16578                 if (mtr_policy->act_cnt[i].modify_hdr) {
16579                         acts[i].dv_actions[acts[i].actions_n] =
16580                                 mtr_policy->act_cnt[i].modify_hdr->action;
16581                         acts[i].actions_n++;
16582                 }
16583                 if (mtr_policy->act_cnt[i].fate_action) {
16584                         switch (mtr_policy->act_cnt[i].fate_action) {
16585                         case MLX5_FLOW_FATE_PORT_ID:
16586                                 port_action = mlx5_ipool_get
16587                                         (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
16588                                 mtr_policy->act_cnt[i].rix_port_id_action);
16589                                 if (!port_action) {
16590                                         DRV_LOG(ERR, "Failed to find "
16591                                                 "port action for policy.");
16592                                         goto err_exit;
16593                                 }
16594                                 acts[i].dv_actions[acts[i].actions_n] =
16595                                         port_action->action;
16596                                 acts[i].actions_n++;
16597                                 mtr_policy->dev = dev;
16598                                 match_src_port = true;
16599                                 break;
16600                         case MLX5_FLOW_FATE_DROP:
16601                         case MLX5_FLOW_FATE_JUMP:
16602                                 acts[i].dv_actions[acts[i].actions_n] =
16603                                 mtr_policy->act_cnt[i].dr_jump_action[domain];
16604                                 acts[i].actions_n++;
16605                                 break;
16606                         case MLX5_FLOW_FATE_SHARED_RSS:
16607                         case MLX5_FLOW_FATE_QUEUE:
16608                                 hrxq = mlx5_ipool_get
16609                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
16610                                          sub_policy->rix_hrxq[i]);
16611                                 if (!hrxq) {
16612                                         DRV_LOG(ERR, "Failed to find "
16613                                                 "queue action for policy.");
16614                                         goto err_exit;
16615                                 }
16616                                 acts[i].dv_actions[acts[i].actions_n] =
16617                                         hrxq->action;
16618                                 acts[i].actions_n++;
16619                                 break;
16620                         case MLX5_FLOW_FATE_MTR:
16621                                 if (!next_fm) {
16622                                         DRV_LOG(ERR,
16623                                                 "No next hierarchy meter.");
16624                                         goto err_exit;
16625                                 }
16626                                 if (!mtr_first) {
16627                                         acts[i].dv_actions[acts[i].actions_n] =
16628                                                         next_fm->meter_action;
16629                                         acts[i].actions_n++;
16630                                 }
16631                                 if (mtr_policy->act_cnt[i].next_sub_policy) {
16632                                         next_sub_policy =
16633                                         mtr_policy->act_cnt[i].next_sub_policy;
16634                                 } else {
16635                                         next_policy =
16636                                                 mlx5_flow_meter_policy_find(dev,
16637                                                 next_fm->policy_id, NULL);
16638                                         MLX5_ASSERT(next_policy);
16639                                         next_sub_policy =
16640                                         next_policy->sub_policys[domain][0];
16641                                 }
16642                                 tbl_data =
16643                                         container_of(next_sub_policy->tbl_rsc,
16644                                         struct mlx5_flow_tbl_data_entry, tbl);
16645                                 acts[i].dv_actions[acts[i].actions_n++] =
16646                                                         tbl_data->jump.action;
16647                                 if (mtr_policy->act_cnt[i].modify_hdr)
16648                                         match_src_port = !!transfer;
16649                                 break;
16650                         default:
16651                                 /*Queue action do nothing*/
16652                                 break;
16653                         }
16654                 }
16655         }
16656         if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
16657                                 egress, transfer, match_src_port, acts)) {
16658                 DRV_LOG(ERR,
16659                         "Failed to create policy rules per domain.");
16660                 goto err_exit;
16661         }
16662         return 0;
16663 err_exit:
16664         if (next_fm)
16665                 mlx5_flow_meter_detach(priv, next_fm);
16666         return -1;
16667 }
16668
16669 /**
16670  * Create the policy rules.
16671  *
16672  * @param[in] dev
16673  *   Pointer to Ethernet device.
16674  * @param[in,out] mtr_policy
16675  *   Pointer to meter policy table.
16676  *
16677  * @return
16678  *   0 on success, -1 otherwise.
16679  */
16680 static int
16681 flow_dv_create_policy_rules(struct rte_eth_dev *dev,
16682                              struct mlx5_flow_meter_policy *mtr_policy)
16683 {
16684         int i;
16685         uint16_t sub_policy_num;
16686
16687         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16688                 sub_policy_num = (mtr_policy->sub_policy_num >>
16689                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
16690                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16691                 if (!sub_policy_num)
16692                         continue;
16693                 /* Prepare actions list and create policy rules. */
16694                 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16695                         mtr_policy->sub_policys[i][0], i)) {
16696                         DRV_LOG(ERR, "Failed to create policy action "
16697                                 "list per domain.");
16698                         return -1;
16699                 }
16700         }
16701         return 0;
16702 }
16703
16704 static int
16705 __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
16706 {
16707         struct mlx5_priv *priv = dev->data->dev_private;
16708         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16709         struct mlx5_flow_meter_def_policy *def_policy;
16710         struct mlx5_flow_tbl_resource *jump_tbl;
16711         struct mlx5_flow_tbl_data_entry *tbl_data;
16712         uint8_t egress, transfer;
16713         struct rte_flow_error error;
16714         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16715         int ret;
16716
16717         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16718         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16719         def_policy = mtrmng->def_policy[domain];
16720         if (!def_policy) {
16721                 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
16722                         sizeof(struct mlx5_flow_meter_def_policy),
16723                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
16724                 if (!def_policy) {
16725                         DRV_LOG(ERR, "Failed to alloc default policy table.");
16726                         goto def_policy_error;
16727                 }
16728                 mtrmng->def_policy[domain] = def_policy;
16729                 /* Create the meter suffix table with SUFFIX level. */
16730                 jump_tbl = flow_dv_tbl_resource_get(dev,
16731                                 MLX5_FLOW_TABLE_LEVEL_METER,
16732                                 egress, transfer, false, NULL, 0,
16733                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
16734                 if (!jump_tbl) {
16735                         DRV_LOG(ERR,
16736                                 "Failed to create meter suffix table.");
16737                         goto def_policy_error;
16738                 }
16739                 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
16740                 tbl_data = container_of(jump_tbl,
16741                                         struct mlx5_flow_tbl_data_entry, tbl);
16742                 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
16743                                                 tbl_data->jump.action;
16744                 acts[RTE_COLOR_GREEN].dv_actions[0] = tbl_data->jump.action;
16745                 acts[RTE_COLOR_GREEN].actions_n = 1;
16746                 /*
16747                  * YELLOW has the same default policy as GREEN does.
16748                  * G & Y share the same table and action. The 2nd time of table
16749                  * resource getting is just to update the reference count for
16750                  * the releasing stage.
16751                  */
16752                 jump_tbl = flow_dv_tbl_resource_get(dev,
16753                                 MLX5_FLOW_TABLE_LEVEL_METER,
16754                                 egress, transfer, false, NULL, 0,
16755                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
16756                 if (!jump_tbl) {
16757                         DRV_LOG(ERR,
16758                                 "Failed to get meter suffix table.");
16759                         goto def_policy_error;
16760                 }
16761                 def_policy->sub_policy.jump_tbl[RTE_COLOR_YELLOW] = jump_tbl;
16762                 tbl_data = container_of(jump_tbl,
16763                                         struct mlx5_flow_tbl_data_entry, tbl);
16764                 def_policy->dr_jump_action[RTE_COLOR_YELLOW] =
16765                                                 tbl_data->jump.action;
16766                 acts[RTE_COLOR_YELLOW].dv_actions[0] = tbl_data->jump.action;
16767                 acts[RTE_COLOR_YELLOW].actions_n = 1;
16768                 /* Create jump action to the drop table. */
16769                 if (!mtrmng->drop_tbl[domain]) {
16770                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
16771                                 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
16772                                  egress, transfer, false, NULL, 0,
16773                                  0, MLX5_MTR_TABLE_ID_DROP, &error);
16774                         if (!mtrmng->drop_tbl[domain]) {
16775                                 DRV_LOG(ERR, "Failed to create meter "
16776                                         "drop table for default policy.");
16777                                 goto def_policy_error;
16778                         }
16779                 }
16780                 /* all RED: unique Drop table for jump action. */
16781                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16782                                         struct mlx5_flow_tbl_data_entry, tbl);
16783                 def_policy->dr_jump_action[RTE_COLOR_RED] =
16784                                                 tbl_data->jump.action;
16785                 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
16786                 acts[RTE_COLOR_RED].actions_n = 1;
16787                 /* Create default policy rules. */
16788                 ret = __flow_dv_create_domain_policy_rules(dev,
16789                                         &def_policy->sub_policy,
16790                                         egress, transfer, false, acts);
16791                 if (ret) {
16792                         DRV_LOG(ERR, "Failed to create default policy rules.");
16793                         goto def_policy_error;
16794                 }
16795         }
16796         return 0;
16797 def_policy_error:
16798         __flow_dv_destroy_domain_def_policy(dev,
16799                                             (enum mlx5_meter_domain)domain);
16800         return -1;
16801 }
16802
16803 /**
16804  * Create the default policy table set.
16805  *
16806  * @param[in] dev
16807  *   Pointer to Ethernet device.
16808  * @return
16809  *   0 on success, -1 otherwise.
16810  */
16811 static int
16812 flow_dv_create_def_policy(struct rte_eth_dev *dev)
16813 {
16814         struct mlx5_priv *priv = dev->data->dev_private;
16815         int i;
16816
16817         /* Non-termination policy table. */
16818         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16819                 if (!priv->sh->config.dv_esw_en &&
16820                     i == MLX5_MTR_DOMAIN_TRANSFER)
16821                         continue;
16822                 if (__flow_dv_create_domain_def_policy(dev, i)) {
16823                         DRV_LOG(ERR, "Failed to create default policy");
16824                         /* Rollback the created default policies for others. */
16825                         flow_dv_destroy_def_policy(dev);
16826                         return -1;
16827                 }
16828         }
16829         return 0;
16830 }
16831
16832 /**
16833  * Create the needed meter tables.
16834  * Lock free, (mutex should be acquired by caller).
16835  *
16836  * @param[in] dev
16837  *   Pointer to Ethernet device.
16838  * @param[in] fm
16839  *   Meter information table.
16840  * @param[in] mtr_idx
16841  *   Meter index.
16842  * @param[in] domain_bitmap
16843  *   Domain bitmap.
16844  * @return
16845  *   0 on success, -1 otherwise.
16846  */
16847 static int
16848 flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
16849                         struct mlx5_flow_meter_info *fm,
16850                         uint32_t mtr_idx,
16851                         uint8_t domain_bitmap)
16852 {
16853         struct mlx5_priv *priv = dev->data->dev_private;
16854         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16855         struct rte_flow_error error;
16856         struct mlx5_flow_tbl_data_entry *tbl_data;
16857         uint8_t egress, transfer;
16858         void *actions[METER_ACTIONS];
16859         int domain, ret, i;
16860         struct mlx5_flow_counter *cnt;
16861         struct mlx5_flow_dv_match_params value = {
16862                 .size = sizeof(value.buf),
16863         };
16864         struct mlx5_flow_dv_match_params matcher_para = {
16865                 .size = sizeof(matcher_para.buf),
16866         };
16867         int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
16868                                                      0, &error);
16869         uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
16870         uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
16871         struct mlx5_list_entry *entry;
16872         struct mlx5_flow_dv_matcher matcher = {
16873                 .mask = {
16874                         .size = sizeof(matcher.mask.buf),
16875                 },
16876         };
16877         struct mlx5_flow_dv_matcher *drop_matcher;
16878         struct mlx5_flow_cb_ctx ctx = {
16879                 .error = &error,
16880                 .data = &matcher,
16881         };
16882         uint8_t misc_mask;
16883
16884         if (!priv->mtr_en || mtr_id_reg_c < 0) {
16885                 rte_errno = ENOTSUP;
16886                 return -1;
16887         }
16888         for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
16889                 if (!(domain_bitmap & (1 << domain)) ||
16890                         (mtrmng->def_rule[domain] && !fm->drop_cnt))
16891                         continue;
16892                 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16893                 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16894                 /* Create the drop table with METER DROP level. */
16895                 if (!mtrmng->drop_tbl[domain]) {
16896                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
16897                                         MLX5_FLOW_TABLE_LEVEL_METER,
16898                                         egress, transfer, false, NULL, 0,
16899                                         0, MLX5_MTR_TABLE_ID_DROP, &error);
16900                         if (!mtrmng->drop_tbl[domain]) {
16901                                 DRV_LOG(ERR, "Failed to create meter drop table.");
16902                                 goto policy_error;
16903                         }
16904                 }
16905                 /* Create default matcher in drop table. */
16906                 matcher.tbl = mtrmng->drop_tbl[domain],
16907                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16908                                 struct mlx5_flow_tbl_data_entry, tbl);
16909                 if (!mtrmng->def_matcher[domain]) {
16910                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16911                                        (enum modify_reg)mtr_id_reg_c,
16912                                        0, 0);
16913                         matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
16914                         matcher.crc = rte_raw_cksum
16915                                         ((const void *)matcher.mask.buf,
16916                                         matcher.mask.size);
16917                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16918                         if (!entry) {
16919                                 DRV_LOG(ERR, "Failed to register meter "
16920                                 "drop default matcher.");
16921                                 goto policy_error;
16922                         }
16923                         mtrmng->def_matcher[domain] = container_of(entry,
16924                         struct mlx5_flow_dv_matcher, entry);
16925                 }
16926                 /* Create default rule in drop table. */
16927                 if (!mtrmng->def_rule[domain]) {
16928                         i = 0;
16929                         actions[i++] = priv->sh->dr_drop_action;
16930                         flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16931                                 (enum modify_reg)mtr_id_reg_c, 0, 0);
16932                         misc_mask = flow_dv_matcher_enable(value.buf);
16933                         __flow_dv_adjust_buf_size(&value.size, misc_mask);
16934                         ret = mlx5_flow_os_create_flow
16935                                 (mtrmng->def_matcher[domain]->matcher_object,
16936                                 (void *)&value, i, actions,
16937                                 &mtrmng->def_rule[domain]);
16938                         if (ret) {
16939                                 DRV_LOG(ERR, "Failed to create meter "
16940                                 "default drop rule for drop table.");
16941                                 goto policy_error;
16942                         }
16943                 }
16944                 if (!fm->drop_cnt)
16945                         continue;
16946                 MLX5_ASSERT(mtrmng->max_mtr_bits);
16947                 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
16948                         /* Create matchers for Drop. */
16949                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16950                                         (enum modify_reg)mtr_id_reg_c, 0,
16951                                         (mtr_id_mask << mtr_id_offset));
16952                         matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
16953                         matcher.crc = rte_raw_cksum
16954                                         ((const void *)matcher.mask.buf,
16955                                         matcher.mask.size);
16956                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16957                         if (!entry) {
16958                                 DRV_LOG(ERR,
16959                                 "Failed to register meter drop matcher.");
16960                                 goto policy_error;
16961                         }
16962                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
16963                                 container_of(entry, struct mlx5_flow_dv_matcher,
16964                                              entry);
16965                 }
16966                 drop_matcher =
16967                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
16968                 /* Create drop rule, matching meter_id only. */
16969                 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16970                                 (enum modify_reg)mtr_id_reg_c,
16971                                 (mtr_idx << mtr_id_offset), UINT32_MAX);
16972                 i = 0;
16973                 cnt = flow_dv_counter_get_by_idx(dev,
16974                                         fm->drop_cnt, NULL);
16975                 actions[i++] = cnt->action;
16976                 actions[i++] = priv->sh->dr_drop_action;
16977                 misc_mask = flow_dv_matcher_enable(value.buf);
16978                 __flow_dv_adjust_buf_size(&value.size, misc_mask);
16979                 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
16980                                                (void *)&value, i, actions,
16981                                                &fm->drop_rule[domain]);
16982                 if (ret) {
16983                         DRV_LOG(ERR, "Failed to create meter "
16984                                 "drop rule for drop table.");
16985                                 goto policy_error;
16986                 }
16987         }
16988         return 0;
16989 policy_error:
16990         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16991                 if (fm->drop_rule[i]) {
16992                         claim_zero(mlx5_flow_os_destroy_flow
16993                                 (fm->drop_rule[i]));
16994                         fm->drop_rule[i] = NULL;
16995                 }
16996         }
16997         return -1;
16998 }
16999
17000 static struct mlx5_flow_meter_sub_policy *
17001 __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
17002                 struct mlx5_flow_meter_policy *mtr_policy,
17003                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS],
17004                 struct mlx5_flow_meter_sub_policy *next_sub_policy,
17005                 bool *is_reuse)
17006 {
17007         struct mlx5_priv *priv = dev->data->dev_private;
17008         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
17009         uint32_t sub_policy_idx = 0;
17010         uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
17011         uint32_t i, j;
17012         struct mlx5_hrxq *hrxq;
17013         struct mlx5_flow_handle dh;
17014         struct mlx5_meter_policy_action_container *act_cnt;
17015         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
17016         uint16_t sub_policy_num;
17017         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
17018
17019         MLX5_ASSERT(wks);
17020         rte_spinlock_lock(&mtr_policy->sl);
17021         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
17022                 if (!rss_desc[i])
17023                         continue;
17024                 hrxq = mlx5_hrxq_get(dev, rss_desc[i]);
17025                 if (!hrxq) {
17026                         rte_spinlock_unlock(&mtr_policy->sl);
17027                         return NULL;
17028                 }
17029                 hrxq_idx[i] = hrxq->idx;
17030         }
17031         sub_policy_num = (mtr_policy->sub_policy_num >>
17032                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17033                         MLX5_MTR_SUB_POLICY_NUM_MASK;
17034         for (j = 0; j < sub_policy_num; j++) {
17035                 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
17036                         if (rss_desc[i] &&
17037                             hrxq_idx[i] !=
17038                             mtr_policy->sub_policys[domain][j]->rix_hrxq[i])
17039                                 break;
17040                 }
17041                 if (i >= MLX5_MTR_RTE_COLORS) {
17042                         /*
17043                          * Found the sub policy table with
17044                          * the same queue per color.
17045                          */
17046                         rte_spinlock_unlock(&mtr_policy->sl);
17047                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
17048                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
17049                         *is_reuse = true;
17050                         return mtr_policy->sub_policys[domain][j];
17051                 }
17052         }
17053         /* Create sub policy. */
17054         if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
17055                 /* Reuse the first pre-allocated sub_policy. */
17056                 sub_policy = mtr_policy->sub_policys[domain][0];
17057                 sub_policy_idx = sub_policy->idx;
17058         } else {
17059                 sub_policy = mlx5_ipool_zmalloc
17060                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17061                                  &sub_policy_idx);
17062                 if (!sub_policy ||
17063                     sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
17064                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
17065                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
17066                         goto rss_sub_policy_error;
17067                 }
17068                 sub_policy->idx = sub_policy_idx;
17069                 sub_policy->main_policy = mtr_policy;
17070         }
17071         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
17072                 if (!rss_desc[i])
17073                         continue;
17074                 sub_policy->rix_hrxq[i] = hrxq_idx[i];
17075                 if (mtr_policy->is_hierarchy) {
17076                         act_cnt = &mtr_policy->act_cnt[i];
17077                         act_cnt->next_sub_policy = next_sub_policy;
17078                         mlx5_hrxq_release(dev, hrxq_idx[i]);
17079                 } else {
17080                         /*
17081                          * Overwrite the last action from
17082                          * RSS action to Queue action.
17083                          */
17084                         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
17085                                               hrxq_idx[i]);
17086                         if (!hrxq) {
17087                                 DRV_LOG(ERR, "Failed to get policy hrxq");
17088                                 goto rss_sub_policy_error;
17089                         }
17090                         act_cnt = &mtr_policy->act_cnt[i];
17091                         if (act_cnt->rix_mark || act_cnt->modify_hdr) {
17092                                 memset(&dh, 0, sizeof(struct mlx5_flow_handle));
17093                                 if (act_cnt->rix_mark)
17094                                         wks->mark = 1;
17095                                 dh.fate_action = MLX5_FLOW_FATE_QUEUE;
17096                                 dh.rix_hrxq = hrxq_idx[i];
17097                                 flow_drv_rxq_flags_set(dev, &dh);
17098                         }
17099                 }
17100         }
17101         if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
17102                                                sub_policy, domain)) {
17103                 DRV_LOG(ERR, "Failed to create policy "
17104                         "rules for ingress domain.");
17105                 goto rss_sub_policy_error;
17106         }
17107         if (sub_policy != mtr_policy->sub_policys[domain][0]) {
17108                 i = (mtr_policy->sub_policy_num >>
17109                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17110                         MLX5_MTR_SUB_POLICY_NUM_MASK;
17111                 if (i >= MLX5_MTR_RSS_MAX_SUB_POLICY) {
17112                         DRV_LOG(ERR, "No free sub-policy slot.");
17113                         goto rss_sub_policy_error;
17114                 }
17115                 mtr_policy->sub_policys[domain][i] = sub_policy;
17116                 i++;
17117                 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17118                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
17119                 mtr_policy->sub_policy_num |=
17120                         (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17121                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
17122         }
17123         rte_spinlock_unlock(&mtr_policy->sl);
17124         *is_reuse = false;
17125         return sub_policy;
17126 rss_sub_policy_error:
17127         if (sub_policy) {
17128                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
17129                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
17130                         i = (mtr_policy->sub_policy_num >>
17131                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17132                         MLX5_MTR_SUB_POLICY_NUM_MASK;
17133                         mtr_policy->sub_policys[domain][i] = NULL;
17134                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17135                                         sub_policy->idx);
17136                 }
17137         }
17138         rte_spinlock_unlock(&mtr_policy->sl);
17139         return NULL;
17140 }
17141
17142 /**
17143  * Find the policy table for prefix table with RSS.
17144  *
17145  * @param[in] dev
17146  *   Pointer to Ethernet device.
17147  * @param[in] mtr_policy
17148  *   Pointer to meter policy table.
17149  * @param[in] rss_desc
17150  *   Pointer to rss_desc
17151  * @return
17152  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
17153  */
17154 static struct mlx5_flow_meter_sub_policy *
17155 flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
17156                 struct mlx5_flow_meter_policy *mtr_policy,
17157                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
17158 {
17159         struct mlx5_priv *priv = dev->data->dev_private;
17160         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
17161         struct mlx5_flow_meter_info *next_fm;
17162         struct mlx5_flow_meter_policy *next_policy;
17163         struct mlx5_flow_meter_sub_policy *next_sub_policy = NULL;
17164         struct mlx5_flow_meter_policy *policies[MLX5_MTR_CHAIN_MAX_NUM];
17165         struct mlx5_flow_meter_sub_policy *sub_policies[MLX5_MTR_CHAIN_MAX_NUM];
17166         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
17167         bool reuse_sub_policy;
17168         uint32_t i = 0;
17169         uint32_t j = 0;
17170
17171         while (true) {
17172                 /* Iterate hierarchy to get all policies in this hierarchy. */
17173                 policies[i++] = mtr_policy;
17174                 if (!mtr_policy->is_hierarchy)
17175                         break;
17176                 if (i >= MLX5_MTR_CHAIN_MAX_NUM) {
17177                         DRV_LOG(ERR, "Exceed max meter number in hierarchy.");
17178                         return NULL;
17179                 }
17180                 next_fm = mlx5_flow_meter_find(priv,
17181                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
17182                 if (!next_fm) {
17183                         DRV_LOG(ERR, "Failed to get next meter in hierarchy.");
17184                         return NULL;
17185                 }
17186                 next_policy =
17187                         mlx5_flow_meter_policy_find(dev, next_fm->policy_id,
17188                                                     NULL);
17189                 MLX5_ASSERT(next_policy);
17190                 mtr_policy = next_policy;
17191         }
17192         while (i) {
17193                 /**
17194                  * From last policy to the first one in hierarchy,
17195                  * create / get the sub policy for each of them.
17196                  */
17197                 sub_policy = __flow_dv_meter_get_rss_sub_policy(dev,
17198                                                         policies[--i],
17199                                                         rss_desc,
17200                                                         next_sub_policy,
17201                                                         &reuse_sub_policy);
17202                 if (!sub_policy) {
17203                         DRV_LOG(ERR, "Failed to get the sub policy.");
17204                         goto err_exit;
17205                 }
17206                 if (!reuse_sub_policy)
17207                         sub_policies[j++] = sub_policy;
17208                 next_sub_policy = sub_policy;
17209         }
17210         return sub_policy;
17211 err_exit:
17212         while (j) {
17213                 uint16_t sub_policy_num;
17214
17215                 sub_policy = sub_policies[--j];
17216                 mtr_policy = sub_policy->main_policy;
17217                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
17218                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
17219                         sub_policy_num = (mtr_policy->sub_policy_num >>
17220                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17221                                 MLX5_MTR_SUB_POLICY_NUM_MASK;
17222                         mtr_policy->sub_policys[domain][sub_policy_num - 1] =
17223                                                                         NULL;
17224                         sub_policy_num--;
17225                         mtr_policy->sub_policy_num &=
17226                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17227                                   (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i));
17228                         mtr_policy->sub_policy_num |=
17229                         (sub_policy_num & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17230                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i);
17231                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17232                                         sub_policy->idx);
17233                 }
17234         }
17235         return NULL;
17236 }
17237
17238 /**
17239  * Create the sub policy tag rule for all meters in hierarchy.
17240  *
17241  * @param[in] dev
17242  *   Pointer to Ethernet device.
17243  * @param[in] fm
17244  *   Meter information table.
17245  * @param[in] src_port
17246  *   The src port this extra rule should use.
17247  * @param[in] item
17248  *   The src port match item.
17249  * @param[out] error
17250  *   Perform verbose error reporting if not NULL.
17251  * @return
17252  *   0 on success, a negative errno value otherwise and rte_errno is set.
17253  */
17254 static int
17255 flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
17256                                 struct mlx5_flow_meter_info *fm,
17257                                 int32_t src_port,
17258                                 const struct rte_flow_item *item,
17259                                 struct rte_flow_error *error)
17260 {
17261         struct mlx5_priv *priv = dev->data->dev_private;
17262         struct mlx5_flow_meter_policy *mtr_policy;
17263         struct mlx5_flow_meter_sub_policy *sub_policy;
17264         struct mlx5_flow_meter_info *next_fm = NULL;
17265         struct mlx5_flow_meter_policy *next_policy;
17266         struct mlx5_flow_meter_sub_policy *next_sub_policy;
17267         struct mlx5_flow_tbl_data_entry *tbl_data;
17268         struct mlx5_sub_policy_color_rule *color_rule;
17269         struct mlx5_meter_policy_acts acts;
17270         uint32_t color_reg_c_idx;
17271         bool mtr_first = (src_port != UINT16_MAX) ? true : false;
17272         struct rte_flow_attr attr = {
17273                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
17274                 .priority = 0,
17275                 .ingress = 0,
17276                 .egress = 0,
17277                 .transfer = 1,
17278                 .reserved = 0,
17279         };
17280         uint32_t domain = MLX5_MTR_DOMAIN_TRANSFER;
17281         int i;
17282
17283         mtr_policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
17284         MLX5_ASSERT(mtr_policy);
17285         if (!mtr_policy->is_hierarchy)
17286                 return 0;
17287         next_fm = mlx5_flow_meter_find(priv,
17288                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
17289         if (!next_fm) {
17290                 return rte_flow_error_set(error, EINVAL,
17291                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
17292                                 "Failed to find next meter in hierarchy.");
17293         }
17294         if (!next_fm->drop_cnt)
17295                 goto exit;
17296         color_reg_c_idx = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, error);
17297         sub_policy = mtr_policy->sub_policys[domain][0];
17298         for (i = 0; i < RTE_COLORS; i++) {
17299                 bool rule_exist = false;
17300                 struct mlx5_meter_policy_action_container *act_cnt;
17301
17302                 if (i >= RTE_COLOR_YELLOW)
17303                         break;
17304                 TAILQ_FOREACH(color_rule,
17305                               &sub_policy->color_rules[i], next_port)
17306                         if (color_rule->src_port == src_port) {
17307                                 rule_exist = true;
17308                                 break;
17309                         }
17310                 if (rule_exist)
17311                         continue;
17312                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
17313                                 sizeof(struct mlx5_sub_policy_color_rule),
17314                                 0, SOCKET_ID_ANY);
17315                 if (!color_rule)
17316                         return rte_flow_error_set(error, ENOMEM,
17317                                 RTE_FLOW_ERROR_TYPE_ACTION,
17318                                 NULL, "No memory to create tag color rule.");
17319                 color_rule->src_port = src_port;
17320                 attr.priority = i;
17321                 next_policy = mlx5_flow_meter_policy_find(dev,
17322                                                 next_fm->policy_id, NULL);
17323                 MLX5_ASSERT(next_policy);
17324                 next_sub_policy = next_policy->sub_policys[domain][0];
17325                 tbl_data = container_of(next_sub_policy->tbl_rsc,
17326                                         struct mlx5_flow_tbl_data_entry, tbl);
17327                 act_cnt = &mtr_policy->act_cnt[i];
17328                 if (mtr_first) {
17329                         acts.dv_actions[0] = next_fm->meter_action;
17330                         acts.dv_actions[1] = act_cnt->modify_hdr->action;
17331                 } else {
17332                         acts.dv_actions[0] = act_cnt->modify_hdr->action;
17333                         acts.dv_actions[1] = next_fm->meter_action;
17334                 }
17335                 acts.dv_actions[2] = tbl_data->jump.action;
17336                 acts.actions_n = 3;
17337                 if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) {
17338                         next_fm = NULL;
17339                         goto err_exit;
17340                 }
17341                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
17342                                 MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
17343                                 &attr, true, item,
17344                                 &color_rule->matcher, error)) {
17345                         rte_flow_error_set(error, errno,
17346                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17347                                 "Failed to create hierarchy meter matcher.");
17348                         goto err_exit;
17349                 }
17350                 if (__flow_dv_create_policy_flow(dev, color_reg_c_idx,
17351                                         (enum rte_color)i,
17352                                         color_rule->matcher->matcher_object,
17353                                         acts.actions_n, acts.dv_actions,
17354                                         true, item,
17355                                         &color_rule->rule, &attr)) {
17356                         rte_flow_error_set(error, errno,
17357                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17358                                 "Failed to create hierarchy meter rule.");
17359                         goto err_exit;
17360                 }
17361                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
17362                                   color_rule, next_port);
17363         }
17364 exit:
17365         /**
17366          * Recursive call to iterate all meters in hierarchy and
17367          * create needed rules.
17368          */
17369         return flow_dv_meter_hierarchy_rule_create(dev, next_fm,
17370                                                 src_port, item, error);
17371 err_exit:
17372         if (color_rule) {
17373                 if (color_rule->rule)
17374                         mlx5_flow_os_destroy_flow(color_rule->rule);
17375                 if (color_rule->matcher) {
17376                         struct mlx5_flow_tbl_data_entry *tbl =
17377                                 container_of(color_rule->matcher->tbl,
17378                                                 typeof(*tbl), tbl);
17379                         mlx5_list_unregister(tbl->matchers,
17380                                                 &color_rule->matcher->entry);
17381                 }
17382                 mlx5_free(color_rule);
17383         }
17384         if (next_fm)
17385                 mlx5_flow_meter_detach(priv, next_fm);
17386         return -rte_errno;
17387 }
17388
17389 /**
17390  * Destroy the sub policy table with RX queue.
17391  *
17392  * @param[in] dev
17393  *   Pointer to Ethernet device.
17394  * @param[in] mtr_policy
17395  *   Pointer to meter policy table.
17396  */
17397 static void
17398 flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
17399                                     struct mlx5_flow_meter_policy *mtr_policy)
17400 {
17401         struct mlx5_priv *priv = dev->data->dev_private;
17402         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
17403         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
17404         uint32_t i, j;
17405         uint16_t sub_policy_num, new_policy_num;
17406
17407         rte_spinlock_lock(&mtr_policy->sl);
17408         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
17409                 switch (mtr_policy->act_cnt[i].fate_action) {
17410                 case MLX5_FLOW_FATE_SHARED_RSS:
17411                         sub_policy_num = (mtr_policy->sub_policy_num >>
17412                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17413                         MLX5_MTR_SUB_POLICY_NUM_MASK;
17414                         new_policy_num = sub_policy_num;
17415                         for (j = 0; j < sub_policy_num; j++) {
17416                                 sub_policy =
17417                                         mtr_policy->sub_policys[domain][j];
17418                                 if (sub_policy) {
17419                                         __flow_dv_destroy_sub_policy_rules(dev,
17420                                                 sub_policy);
17421                                 if (sub_policy !=
17422                                         mtr_policy->sub_policys[domain][0]) {
17423                                         mtr_policy->sub_policys[domain][j] =
17424                                                                 NULL;
17425                                         mlx5_ipool_free
17426                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17427                                                 sub_policy->idx);
17428                                                 new_policy_num--;
17429                                         }
17430                                 }
17431                         }
17432                         if (new_policy_num != sub_policy_num) {
17433                                 mtr_policy->sub_policy_num &=
17434                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17435                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
17436                                 mtr_policy->sub_policy_num |=
17437                                 (new_policy_num &
17438                                         MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17439                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
17440                         }
17441                         break;
17442                 case MLX5_FLOW_FATE_QUEUE:
17443                         sub_policy = mtr_policy->sub_policys[domain][0];
17444                         __flow_dv_destroy_sub_policy_rules(dev,
17445                                                            sub_policy);
17446                         break;
17447                 default:
17448                         /*Other actions without queue and do nothing*/
17449                         break;
17450                 }
17451         }
17452         rte_spinlock_unlock(&mtr_policy->sl);
17453 }
17454 /**
17455  * Check whether the DR drop action is supported on the root table or not.
17456  *
17457  * Create a simple flow with DR drop action on root table to validate
17458  * if DR drop action on root table is supported or not.
17459  *
17460  * @param[in] dev
17461  *   Pointer to rte_eth_dev structure.
17462  *
17463  * @return
17464  *   0 on success, a negative errno value otherwise and rte_errno is set.
17465  */
17466 int
17467 mlx5_flow_discover_dr_action_support(struct rte_eth_dev *dev)
17468 {
17469         struct mlx5_priv *priv = dev->data->dev_private;
17470         struct mlx5_dev_ctx_shared *sh = priv->sh;
17471         struct mlx5_flow_dv_match_params mask = {
17472                 .size = sizeof(mask.buf),
17473         };
17474         struct mlx5_flow_dv_match_params value = {
17475                 .size = sizeof(value.buf),
17476         };
17477         struct mlx5dv_flow_matcher_attr dv_attr = {
17478                 .type = IBV_FLOW_ATTR_NORMAL,
17479                 .priority = 0,
17480                 .match_criteria_enable = 0,
17481                 .match_mask = (void *)&mask,
17482         };
17483         struct mlx5_flow_tbl_resource *tbl = NULL;
17484         void *matcher = NULL;
17485         void *flow = NULL;
17486         int ret = -1;
17487
17488         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL,
17489                                         0, 0, 0, NULL);
17490         if (!tbl)
17491                 goto err;
17492         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17493         __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17494         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
17495                                                tbl->obj, &matcher);
17496         if (ret)
17497                 goto err;
17498         __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17499         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17500                                        &sh->dr_drop_action, &flow);
17501 err:
17502         /*
17503          * If DR drop action is not supported on root table, flow create will
17504          * be failed with EOPNOTSUPP or EPROTONOSUPPORT.
17505          */
17506         if (!flow) {
17507                 if (matcher &&
17508                     (errno == EPROTONOSUPPORT || errno == EOPNOTSUPP))
17509                         DRV_LOG(INFO, "DR drop action is not supported in root table.");
17510                 else
17511                         DRV_LOG(ERR, "Unexpected error in DR drop action support detection");
17512                 ret = -1;
17513         } else {
17514                 claim_zero(mlx5_flow_os_destroy_flow(flow));
17515         }
17516         if (matcher)
17517                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
17518         if (tbl)
17519                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
17520         return ret;
17521 }
17522
17523 /**
17524  * Validate the batch counter support in root table.
17525  *
17526  * Create a simple flow with invalid counter and drop action on root table to
17527  * validate if batch counter with offset on root table is supported or not.
17528  *
17529  * @param[in] dev
17530  *   Pointer to rte_eth_dev structure.
17531  *
17532  * @return
17533  *   0 on success, a negative errno value otherwise and rte_errno is set.
17534  */
17535 int
17536 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
17537 {
17538         struct mlx5_priv *priv = dev->data->dev_private;
17539         struct mlx5_dev_ctx_shared *sh = priv->sh;
17540         struct mlx5_flow_dv_match_params mask = {
17541                 .size = sizeof(mask.buf),
17542         };
17543         struct mlx5_flow_dv_match_params value = {
17544                 .size = sizeof(value.buf),
17545         };
17546         struct mlx5dv_flow_matcher_attr dv_attr = {
17547                 .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
17548                 .priority = 0,
17549                 .match_criteria_enable = 0,
17550                 .match_mask = (void *)&mask,
17551         };
17552         void *actions[2] = { 0 };
17553         struct mlx5_flow_tbl_resource *tbl = NULL;
17554         struct mlx5_devx_obj *dcs = NULL;
17555         void *matcher = NULL;
17556         void *flow = NULL;
17557         int ret = -1;
17558
17559         tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
17560                                         0, 0, 0, NULL);
17561         if (!tbl)
17562                 goto err;
17563         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
17564         if (!dcs)
17565                 goto err;
17566         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
17567                                                     &actions[0]);
17568         if (ret)
17569                 goto err;
17570         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17571         __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17572         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
17573                                                tbl->obj, &matcher);
17574         if (ret)
17575                 goto err;
17576         __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17577         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17578                                        actions, &flow);
17579 err:
17580         /*
17581          * If batch counter with offset is not supported, the driver will not
17582          * validate the invalid offset value, flow create should success.
17583          * In this case, it means batch counter is not supported in root table.
17584          *
17585          * Otherwise, if flow create is failed, counter offset is supported.
17586          */
17587         if (flow) {
17588                 DRV_LOG(INFO, "Batch counter is not supported in root "
17589                               "table. Switch to fallback mode.");
17590                 rte_errno = ENOTSUP;
17591                 ret = -rte_errno;
17592                 claim_zero(mlx5_flow_os_destroy_flow(flow));
17593         } else {
17594                 /* Check matcher to make sure validate fail at flow create. */
17595                 if (!matcher || (matcher && errno != EINVAL))
17596                         DRV_LOG(ERR, "Unexpected error in counter offset "
17597                                      "support detection");
17598                 ret = 0;
17599         }
17600         if (actions[0])
17601                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
17602         if (matcher)
17603                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
17604         if (tbl)
17605                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
17606         if (dcs)
17607                 claim_zero(mlx5_devx_cmd_destroy(dcs));
17608         return ret;
17609 }
17610
17611 /**
17612  * Query a devx counter.
17613  *
17614  * @param[in] dev
17615  *   Pointer to the Ethernet device structure.
17616  * @param[in] cnt
17617  *   Index to the flow counter.
17618  * @param[in] clear
17619  *   Set to clear the counter statistics.
17620  * @param[out] pkts
17621  *   The statistics value of packets.
17622  * @param[out] bytes
17623  *   The statistics value of bytes.
17624  *
17625  * @return
17626  *   0 on success, otherwise return -1.
17627  */
17628 static int
17629 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
17630                       uint64_t *pkts, uint64_t *bytes, void **action)
17631 {
17632         struct mlx5_priv *priv = dev->data->dev_private;
17633         struct mlx5_flow_counter *cnt;
17634         uint64_t inn_pkts, inn_bytes;
17635         int ret;
17636
17637         if (!priv->sh->cdev->config.devx)
17638                 return -1;
17639
17640         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
17641         if (ret)
17642                 return -1;
17643         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
17644         if (cnt && action)
17645                 *action = cnt->action;
17646
17647         *pkts = inn_pkts - cnt->hits;
17648         *bytes = inn_bytes - cnt->bytes;
17649         if (clear) {
17650                 cnt->hits = inn_pkts;
17651                 cnt->bytes = inn_bytes;
17652         }
17653         return 0;
17654 }
17655
17656 /**
17657  * Get aged-out flows.
17658  *
17659  * @param[in] dev
17660  *   Pointer to the Ethernet device structure.
17661  * @param[in] context
17662  *   The address of an array of pointers to the aged-out flows contexts.
17663  * @param[in] nb_contexts
17664  *   The length of context array pointers.
17665  * @param[out] error
17666  *   Perform verbose error reporting if not NULL. Initialized in case of
17667  *   error only.
17668  *
17669  * @return
17670  *   how many contexts get in success, otherwise negative errno value.
17671  *   if nb_contexts is 0, return the amount of all aged contexts.
17672  *   if nb_contexts is not 0 , return the amount of aged flows reported
17673  *   in the context array.
17674  * @note: only stub for now
17675  */
17676 static int
17677 flow_dv_get_aged_flows(struct rte_eth_dev *dev,
17678                     void **context,
17679                     uint32_t nb_contexts,
17680                     struct rte_flow_error *error)
17681 {
17682         struct mlx5_priv *priv = dev->data->dev_private;
17683         struct mlx5_age_info *age_info;
17684         struct mlx5_age_param *age_param;
17685         struct mlx5_flow_counter *counter;
17686         struct mlx5_aso_age_action *act;
17687         int nb_flows = 0;
17688
17689         if (nb_contexts && !context)
17690                 return rte_flow_error_set(error, EINVAL,
17691                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17692                                           NULL, "empty context");
17693         age_info = GET_PORT_AGE_INFO(priv);
17694         rte_spinlock_lock(&age_info->aged_sl);
17695         LIST_FOREACH(act, &age_info->aged_aso, next) {
17696                 nb_flows++;
17697                 if (nb_contexts) {
17698                         context[nb_flows - 1] =
17699                                                 act->age_params.context;
17700                         if (!(--nb_contexts))
17701                                 break;
17702                 }
17703         }
17704         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
17705                 nb_flows++;
17706                 if (nb_contexts) {
17707                         age_param = MLX5_CNT_TO_AGE(counter);
17708                         context[nb_flows - 1] = age_param->context;
17709                         if (!(--nb_contexts))
17710                                 break;
17711                 }
17712         }
17713         rte_spinlock_unlock(&age_info->aged_sl);
17714         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
17715         return nb_flows;
17716 }
17717
17718 /*
17719  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
17720  */
17721 static uint32_t
17722 flow_dv_counter_allocate(struct rte_eth_dev *dev)
17723 {
17724         return flow_dv_counter_alloc(dev, 0);
17725 }
17726
17727 /**
17728  * Validate indirect action.
17729  * Dispatcher for action type specific validation.
17730  *
17731  * @param[in] dev
17732  *   Pointer to the Ethernet device structure.
17733  * @param[in] conf
17734  *   Indirect action configuration.
17735  * @param[in] action
17736  *   The indirect action object to validate.
17737  * @param[out] error
17738  *   Perform verbose error reporting if not NULL. Initialized in case of
17739  *   error only.
17740  *
17741  * @return
17742  *   0 on success, otherwise negative errno value.
17743  */
17744 int
17745 flow_dv_action_validate(struct rte_eth_dev *dev,
17746                         const struct rte_flow_indir_action_conf *conf,
17747                         const struct rte_flow_action *action,
17748                         struct rte_flow_error *err)
17749 {
17750         struct mlx5_priv *priv = dev->data->dev_private;
17751
17752         RTE_SET_USED(conf);
17753         switch (action->type) {
17754         case RTE_FLOW_ACTION_TYPE_RSS:
17755                 /*
17756                  * priv->obj_ops is set according to driver capabilities.
17757                  * When DevX capabilities are
17758                  * sufficient, it is set to devx_obj_ops.
17759                  * Otherwise, it is set to ibv_obj_ops.
17760                  * ibv_obj_ops doesn't support ind_table_modify operation.
17761                  * In this case the indirect RSS action can't be used.
17762                  */
17763                 if (priv->obj_ops.ind_table_modify == NULL)
17764                         return rte_flow_error_set
17765                                         (err, ENOTSUP,
17766                                          RTE_FLOW_ERROR_TYPE_ACTION,
17767                                          NULL,
17768                                          "Indirect RSS action not supported");
17769                 return mlx5_validate_action_rss(dev, action, err);
17770         case RTE_FLOW_ACTION_TYPE_AGE:
17771                 if (!priv->sh->aso_age_mng)
17772                         return rte_flow_error_set(err, ENOTSUP,
17773                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17774                                                 NULL,
17775                                                 "Indirect age action not supported");
17776                 return flow_dv_validate_action_age(0, action, dev, err);
17777         case RTE_FLOW_ACTION_TYPE_COUNT:
17778                 return flow_dv_validate_action_count(dev, true, 0, NULL, err);
17779         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
17780                 if (!priv->sh->ct_aso_en)
17781                         return rte_flow_error_set(err, ENOTSUP,
17782                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17783                                         "ASO CT is not supported");
17784                 return mlx5_validate_action_ct(dev, action->conf, err);
17785         default:
17786                 return rte_flow_error_set(err, ENOTSUP,
17787                                           RTE_FLOW_ERROR_TYPE_ACTION,
17788                                           NULL,
17789                                           "action type not supported");
17790         }
17791 }
17792
17793 /*
17794  * Check if the RSS configurations for colors of a meter policy match
17795  * each other, except the queues.
17796  *
17797  * @param[in] r1
17798  *   Pointer to the first RSS flow action.
17799  * @param[in] r2
17800  *   Pointer to the second RSS flow action.
17801  *
17802  * @return
17803  *   0 on match, 1 on conflict.
17804  */
17805 static inline int
17806 flow_dv_mtr_policy_rss_compare(const struct rte_flow_action_rss *r1,
17807                                const struct rte_flow_action_rss *r2)
17808 {
17809         if (r1 == NULL || r2 == NULL)
17810                 return 0;
17811         if (!(r1->level <= 1 && r2->level <= 1) &&
17812             !(r1->level > 1 && r2->level > 1))
17813                 return 1;
17814         if (r1->types != r2->types &&
17815             !((r1->types == 0 || r1->types == RTE_ETH_RSS_IP) &&
17816               (r2->types == 0 || r2->types == RTE_ETH_RSS_IP)))
17817                 return 1;
17818         if (r1->key || r2->key) {
17819                 const void *key1 = r1->key ? r1->key : rss_hash_default_key;
17820                 const void *key2 = r2->key ? r2->key : rss_hash_default_key;
17821
17822                 if (memcmp(key1, key2, MLX5_RSS_HASH_KEY_LEN))
17823                         return 1;
17824         }
17825         return 0;
17826 }
17827
17828 /**
17829  * Validate the meter hierarchy chain for meter policy.
17830  *
17831  * @param[in] dev
17832  *   Pointer to the Ethernet device structure.
17833  * @param[in] meter_id
17834  *   Meter id.
17835  * @param[in] action_flags
17836  *   Holds the actions detected until now.
17837  * @param[out] is_rss
17838  *   Is RSS or not.
17839  * @param[out] hierarchy_domain
17840  *   The domain bitmap for hierarchy policy.
17841  * @param[out] error
17842  *   Perform verbose error reporting if not NULL. Initialized in case of
17843  *   error only.
17844  *
17845  * @return
17846  *   0 on success, otherwise negative errno value with error set.
17847  */
17848 static int
17849 flow_dv_validate_policy_mtr_hierarchy(struct rte_eth_dev *dev,
17850                                   uint32_t meter_id,
17851                                   uint64_t action_flags,
17852                                   bool *is_rss,
17853                                   uint8_t *hierarchy_domain,
17854                                   struct rte_mtr_error *error)
17855 {
17856         struct mlx5_priv *priv = dev->data->dev_private;
17857         struct mlx5_flow_meter_info *fm;
17858         struct mlx5_flow_meter_policy *policy;
17859         uint8_t cnt = 1;
17860
17861         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
17862                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
17863                 return -rte_mtr_error_set(error, EINVAL,
17864                                         RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN,
17865                                         NULL,
17866                                         "Multiple fate actions not supported.");
17867         *hierarchy_domain = 0;
17868         while (true) {
17869                 fm = mlx5_flow_meter_find(priv, meter_id, NULL);
17870                 if (!fm)
17871                         return -rte_mtr_error_set(error, EINVAL,
17872                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17873                                         "Meter not found in meter hierarchy.");
17874                 if (fm->def_policy)
17875                         return -rte_mtr_error_set(error, EINVAL,
17876                                         RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17877                         "Non termination meter not supported in hierarchy.");
17878                 policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
17879                 MLX5_ASSERT(policy);
17880                 /**
17881                  * Only inherit the supported domains of the first meter in
17882                  * hierarchy.
17883                  * One meter supports at least one domain.
17884                  */
17885                 if (!*hierarchy_domain) {
17886                         if (policy->transfer)
17887                                 *hierarchy_domain |=
17888                                                 MLX5_MTR_DOMAIN_TRANSFER_BIT;
17889                         if (policy->ingress)
17890                                 *hierarchy_domain |=
17891                                                 MLX5_MTR_DOMAIN_INGRESS_BIT;
17892                         if (policy->egress)
17893                                 *hierarchy_domain |= MLX5_MTR_DOMAIN_EGRESS_BIT;
17894                 }
17895                 if (!policy->is_hierarchy) {
17896                         *is_rss = policy->is_rss;
17897                         break;
17898                 }
17899                 meter_id = policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id;
17900                 if (++cnt >= MLX5_MTR_CHAIN_MAX_NUM)
17901                         return -rte_mtr_error_set(error, EINVAL,
17902                                         RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
17903                                         "Exceed max hierarchy meter number.");
17904         }
17905         return 0;
17906 }
17907
17908 /**
17909  * Validate meter policy actions.
17910  * Dispatcher for action type specific validation.
17911  *
17912  * @param[in] dev
17913  *   Pointer to the Ethernet device structure.
17914  * @param[in] action
17915  *   The meter policy action object to validate.
17916  * @param[in] attr
17917  *   Attributes of flow to determine steering domain.
17918  * @param[out] error
17919  *   Perform verbose error reporting if not NULL. Initialized in case of
17920  *   error only.
17921  *
17922  * @return
17923  *   0 on success, otherwise negative errno value.
17924  */
17925 static int
17926 flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
17927                         const struct rte_flow_action *actions[RTE_COLORS],
17928                         struct rte_flow_attr *attr,
17929                         bool *is_rss,
17930                         uint8_t *domain_bitmap,
17931                         uint8_t *policy_mode,
17932                         struct rte_mtr_error *error)
17933 {
17934         struct mlx5_priv *priv = dev->data->dev_private;
17935         struct mlx5_sh_config *dev_conf = &priv->sh->config;
17936         const struct rte_flow_action *act;
17937         uint64_t action_flags[RTE_COLORS] = {0};
17938         int actions_n;
17939         int i, ret;
17940         struct rte_flow_error flow_err;
17941         uint8_t domain_color[RTE_COLORS] = {0};
17942         uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
17943         uint8_t hierarchy_domain = 0;
17944         const struct rte_flow_action_meter *mtr;
17945         bool def_green = false;
17946         bool def_yellow = false;
17947         const struct rte_flow_action_rss *rss_color[RTE_COLORS] = {NULL};
17948
17949         if (!dev_conf->dv_esw_en)
17950                 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
17951         *domain_bitmap = def_domain;
17952         /* Red color could only support DROP action. */
17953         if (!actions[RTE_COLOR_RED] ||
17954             actions[RTE_COLOR_RED]->type != RTE_FLOW_ACTION_TYPE_DROP)
17955                 return -rte_mtr_error_set(error, ENOTSUP,
17956                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17957                                 NULL, "Red color only supports drop action.");
17958         /*
17959          * Check default policy actions:
17960          * Green / Yellow: no action, Red: drop action
17961          * Either G or Y will trigger default policy actions to be created.
17962          */
17963         if (!actions[RTE_COLOR_GREEN] ||
17964             actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)
17965                 def_green = true;
17966         if (!actions[RTE_COLOR_YELLOW] ||
17967             actions[RTE_COLOR_YELLOW]->type == RTE_FLOW_ACTION_TYPE_END)
17968                 def_yellow = true;
17969         if (def_green && def_yellow) {
17970                 *policy_mode = MLX5_MTR_POLICY_MODE_DEF;
17971                 return 0;
17972         } else if (!def_green && def_yellow) {
17973                 *policy_mode = MLX5_MTR_POLICY_MODE_OG;
17974         } else if (def_green && !def_yellow) {
17975                 *policy_mode = MLX5_MTR_POLICY_MODE_OY;
17976         } else {
17977                 *policy_mode = MLX5_MTR_POLICY_MODE_ALL;
17978         }
17979         /* Set to empty string in case of NULL pointer access by user. */
17980         flow_err.message = "";
17981         for (i = 0; i < RTE_COLORS; i++) {
17982                 act = actions[i];
17983                 for (action_flags[i] = 0, actions_n = 0;
17984                      act && act->type != RTE_FLOW_ACTION_TYPE_END;
17985                      act++) {
17986                         if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
17987                                 return -rte_mtr_error_set(error, ENOTSUP,
17988                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17989                                           NULL, "too many actions");
17990                         switch (act->type) {
17991                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
17992                         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
17993                                 if (!dev_conf->dv_esw_en)
17994                                         return -rte_mtr_error_set(error,
17995                                         ENOTSUP,
17996                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17997                                         NULL, "PORT action validate check"
17998                                         " fail for ESW disable");
17999                                 ret = flow_dv_validate_action_port_id(dev,
18000                                                 action_flags[i],
18001                                                 act, attr, &flow_err);
18002                                 if (ret)
18003                                         return -rte_mtr_error_set(error,
18004                                         ENOTSUP,
18005                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18006                                         NULL, flow_err.message ?
18007                                         flow_err.message :
18008                                         "PORT action validate check fail");
18009                                 ++actions_n;
18010                                 action_flags[i] |= MLX5_FLOW_ACTION_PORT_ID;
18011                                 break;
18012                         case RTE_FLOW_ACTION_TYPE_MARK:
18013                                 ret = flow_dv_validate_action_mark(dev, act,
18014                                                            action_flags[i],
18015                                                            attr, &flow_err);
18016                                 if (ret < 0)
18017                                         return -rte_mtr_error_set(error,
18018                                         ENOTSUP,
18019                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18020                                         NULL, flow_err.message ?
18021                                         flow_err.message :
18022                                         "Mark action validate check fail");
18023                                 if (dev_conf->dv_xmeta_en !=
18024                                         MLX5_XMETA_MODE_LEGACY)
18025                                         return -rte_mtr_error_set(error,
18026                                         ENOTSUP,
18027                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18028                                         NULL, "Extend MARK action is "
18029                                         "not supported. Please try use "
18030                                         "default policy for meter.");
18031                                 action_flags[i] |= MLX5_FLOW_ACTION_MARK;
18032                                 ++actions_n;
18033                                 break;
18034                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
18035                                 ret = flow_dv_validate_action_set_tag(dev,
18036                                                         act, action_flags[i],
18037                                                         attr, &flow_err);
18038                                 if (ret)
18039                                         return -rte_mtr_error_set(error,
18040                                         ENOTSUP,
18041                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18042                                         NULL, flow_err.message ?
18043                                         flow_err.message :
18044                                         "Set tag action validate check fail");
18045                                 action_flags[i] |= MLX5_FLOW_ACTION_SET_TAG;
18046                                 ++actions_n;
18047                                 break;
18048                         case RTE_FLOW_ACTION_TYPE_DROP:
18049                                 ret = mlx5_flow_validate_action_drop
18050                                         (action_flags[i], attr, &flow_err);
18051                                 if (ret < 0)
18052                                         return -rte_mtr_error_set(error,
18053                                         ENOTSUP,
18054                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18055                                         NULL, flow_err.message ?
18056                                         flow_err.message :
18057                                         "Drop action validate check fail");
18058                                 action_flags[i] |= MLX5_FLOW_ACTION_DROP;
18059                                 ++actions_n;
18060                                 break;
18061                         case RTE_FLOW_ACTION_TYPE_QUEUE:
18062                                 /*
18063                                  * Check whether extensive
18064                                  * metadata feature is engaged.
18065                                  */
18066                                 if (dev_conf->dv_flow_en &&
18067                                     (dev_conf->dv_xmeta_en !=
18068                                      MLX5_XMETA_MODE_LEGACY) &&
18069                                     mlx5_flow_ext_mreg_supported(dev))
18070                                         return -rte_mtr_error_set(error,
18071                                           ENOTSUP,
18072                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
18073                                           NULL, "Queue action with meta "
18074                                           "is not supported. Please try use "
18075                                           "default policy for meter.");
18076                                 ret = mlx5_flow_validate_action_queue(act,
18077                                                         action_flags[i], dev,
18078                                                         attr, &flow_err);
18079                                 if (ret < 0)
18080                                         return -rte_mtr_error_set(error,
18081                                           ENOTSUP,
18082                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
18083                                           NULL, flow_err.message ?
18084                                           flow_err.message :
18085                                           "Queue action validate check fail");
18086                                 action_flags[i] |= MLX5_FLOW_ACTION_QUEUE;
18087                                 ++actions_n;
18088                                 break;
18089                         case RTE_FLOW_ACTION_TYPE_RSS:
18090                                 if (dev_conf->dv_flow_en &&
18091                                     (dev_conf->dv_xmeta_en !=
18092                                      MLX5_XMETA_MODE_LEGACY) &&
18093                                     mlx5_flow_ext_mreg_supported(dev))
18094                                         return -rte_mtr_error_set(error,
18095                                           ENOTSUP,
18096                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
18097                                           NULL, "RSS action with meta "
18098                                           "is not supported. Please try use "
18099                                           "default policy for meter.");
18100                                 ret = mlx5_validate_action_rss(dev, act,
18101                                                                &flow_err);
18102                                 if (ret < 0)
18103                                         return -rte_mtr_error_set(error,
18104                                           ENOTSUP,
18105                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
18106                                           NULL, flow_err.message ?
18107                                           flow_err.message :
18108                                           "RSS action validate check fail");
18109                                 action_flags[i] |= MLX5_FLOW_ACTION_RSS;
18110                                 ++actions_n;
18111                                 /* Either G or Y will set the RSS. */
18112                                 rss_color[i] = act->conf;
18113                                 break;
18114                         case RTE_FLOW_ACTION_TYPE_JUMP:
18115                                 ret = flow_dv_validate_action_jump(dev,
18116                                         NULL, act, action_flags[i],
18117                                         attr, true, &flow_err);
18118                                 if (ret)
18119                                         return -rte_mtr_error_set(error,
18120                                           ENOTSUP,
18121                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
18122                                           NULL, flow_err.message ?
18123                                           flow_err.message :
18124                                           "Jump action validate check fail");
18125                                 ++actions_n;
18126                                 action_flags[i] |= MLX5_FLOW_ACTION_JUMP;
18127                                 break;
18128                         /*
18129                          * Only the last meter in the hierarchy will support
18130                          * the YELLOW color steering. Then in the meter policy
18131                          * actions list, there should be no other meter inside.
18132                          */
18133                         case RTE_FLOW_ACTION_TYPE_METER:
18134                                 if (i != RTE_COLOR_GREEN)
18135                                         return -rte_mtr_error_set(error,
18136                                                 ENOTSUP,
18137                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
18138                                                 NULL,
18139                                                 "Meter hierarchy only supports GREEN color.");
18140                                 if (*policy_mode != MLX5_MTR_POLICY_MODE_OG)
18141                                         return -rte_mtr_error_set(error,
18142                                                 ENOTSUP,
18143                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
18144                                                 NULL,
18145                                                 "No yellow policy should be provided in meter hierarchy.");
18146                                 mtr = act->conf;
18147                                 ret = flow_dv_validate_policy_mtr_hierarchy(dev,
18148                                                         mtr->mtr_id,
18149                                                         action_flags[i],
18150                                                         is_rss,
18151                                                         &hierarchy_domain,
18152                                                         error);
18153                                 if (ret)
18154                                         return ret;
18155                                 ++actions_n;
18156                                 action_flags[i] |=
18157                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
18158                                 break;
18159                         default:
18160                                 return -rte_mtr_error_set(error, ENOTSUP,
18161                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18162                                         NULL,
18163                                         "Doesn't support optional action");
18164                         }
18165                 }
18166                 if (action_flags[i] & MLX5_FLOW_ACTION_PORT_ID) {
18167                         domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
18168                 } else if ((action_flags[i] &
18169                           (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
18170                           (action_flags[i] & MLX5_FLOW_ACTION_MARK)) {
18171                         /*
18172                          * Only support MLX5_XMETA_MODE_LEGACY
18173                          * so MARK action is only in ingress domain.
18174                          */
18175                         domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
18176                 } else {
18177                         domain_color[i] = def_domain;
18178                         if (action_flags[i] &&
18179                             !(action_flags[i] & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
18180                                 domain_color[i] &=
18181                                 ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
18182                 }
18183                 if (action_flags[i] &
18184                     MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
18185                         domain_color[i] &= hierarchy_domain;
18186                 /*
18187                  * Non-termination actions only support NIC Tx domain.
18188                  * The adjustion should be skipped when there is no
18189                  * action or only END is provided. The default domains
18190                  * bit-mask is set to find the MIN intersection.
18191                  * The action flags checking should also be skipped.
18192                  */
18193                 if ((def_green && i == RTE_COLOR_GREEN) ||
18194                     (def_yellow && i == RTE_COLOR_YELLOW))
18195                         continue;
18196                 /*
18197                  * Validate the drop action mutual exclusion
18198                  * with other actions. Drop action is mutually-exclusive
18199                  * with any other action, except for Count action.
18200                  */
18201                 if ((action_flags[i] & MLX5_FLOW_ACTION_DROP) &&
18202                     (action_flags[i] & ~MLX5_FLOW_ACTION_DROP)) {
18203                         return -rte_mtr_error_set(error, ENOTSUP,
18204                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
18205                                 NULL, "Drop action is mutually-exclusive "
18206                                 "with any other action");
18207                 }
18208                 /* Eswitch has few restrictions on using items and actions */
18209                 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
18210                         if (!mlx5_flow_ext_mreg_supported(dev) &&
18211                             action_flags[i] & MLX5_FLOW_ACTION_MARK)
18212                                 return -rte_mtr_error_set(error, ENOTSUP,
18213                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18214                                         NULL, "unsupported action MARK");
18215                         if (action_flags[i] & MLX5_FLOW_ACTION_QUEUE)
18216                                 return -rte_mtr_error_set(error, ENOTSUP,
18217                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18218                                         NULL, "unsupported action QUEUE");
18219                         if (action_flags[i] & MLX5_FLOW_ACTION_RSS)
18220                                 return -rte_mtr_error_set(error, ENOTSUP,
18221                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18222                                         NULL, "unsupported action RSS");
18223                         if (!(action_flags[i] & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
18224                                 return -rte_mtr_error_set(error, ENOTSUP,
18225                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18226                                         NULL, "no fate action is found");
18227                 } else {
18228                         if (!(action_flags[i] & MLX5_FLOW_FATE_ACTIONS) &&
18229                             (domain_color[i] & MLX5_MTR_DOMAIN_INGRESS_BIT)) {
18230                                 if ((domain_color[i] &
18231                                      MLX5_MTR_DOMAIN_EGRESS_BIT))
18232                                         domain_color[i] =
18233                                                 MLX5_MTR_DOMAIN_EGRESS_BIT;
18234                                 else
18235                                         return -rte_mtr_error_set(error,
18236                                                 ENOTSUP,
18237                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
18238                                                 NULL,
18239                                                 "no fate action is found");
18240                         }
18241                 }
18242         }
18243         /* If both colors have RSS, the attributes should be the same. */
18244         if (flow_dv_mtr_policy_rss_compare(rss_color[RTE_COLOR_GREEN],
18245                                            rss_color[RTE_COLOR_YELLOW]))
18246                 return -rte_mtr_error_set(error, EINVAL,
18247                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
18248                                           NULL, "policy RSS attr conflict");
18249         if (rss_color[RTE_COLOR_GREEN] || rss_color[RTE_COLOR_YELLOW])
18250                 *is_rss = true;
18251         /* "domain_color[C]" is non-zero for each color, default is ALL. */
18252         if (!def_green && !def_yellow &&
18253             domain_color[RTE_COLOR_GREEN] != domain_color[RTE_COLOR_YELLOW] &&
18254             !(action_flags[RTE_COLOR_GREEN] & MLX5_FLOW_ACTION_DROP) &&
18255             !(action_flags[RTE_COLOR_YELLOW] & MLX5_FLOW_ACTION_DROP))
18256                 return -rte_mtr_error_set(error, EINVAL,
18257                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
18258                                           NULL, "policy domains conflict");
18259         /*
18260          * At least one color policy is listed in the actions, the domains
18261          * to be supported should be the intersection.
18262          */
18263         *domain_bitmap = domain_color[RTE_COLOR_GREEN] &
18264                          domain_color[RTE_COLOR_YELLOW];
18265         return 0;
18266 }
18267
18268 static int
18269 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
18270 {
18271         struct mlx5_priv *priv = dev->data->dev_private;
18272         int ret = 0;
18273
18274         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
18275                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
18276                                                 flags);
18277                 if (ret != 0)
18278                         return ret;
18279         }
18280         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
18281                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
18282                 if (ret != 0)
18283                         return ret;
18284         }
18285         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
18286                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
18287                 if (ret != 0)
18288                         return ret;
18289         }
18290         return 0;
18291 }
18292
18293 /**
18294  * Discover the number of available flow priorities
18295  * by trying to create a flow with the highest priority value
18296  * for each possible number.
18297  *
18298  * @param[in] dev
18299  *   Ethernet device.
18300  * @param[in] vprio
18301  *   List of possible number of available priorities.
18302  * @param[in] vprio_n
18303  *   Size of @p vprio array.
18304  * @return
18305  *   On success, number of available flow priorities.
18306  *   On failure, a negative errno-style code and rte_errno is set.
18307  */
18308 static int
18309 flow_dv_discover_priorities(struct rte_eth_dev *dev,
18310                             const uint16_t *vprio, int vprio_n)
18311 {
18312         struct mlx5_priv *priv = dev->data->dev_private;
18313         struct mlx5_indexed_pool *pool = priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW];
18314         struct rte_flow_item_eth eth;
18315         struct rte_flow_item item = {
18316                 .type = RTE_FLOW_ITEM_TYPE_ETH,
18317                 .spec = &eth,
18318                 .mask = &eth,
18319         };
18320         struct mlx5_flow_dv_matcher matcher = {
18321                 .mask = {
18322                         .size = sizeof(matcher.mask.buf),
18323                 },
18324         };
18325         union mlx5_flow_tbl_key tbl_key;
18326         struct mlx5_flow flow;
18327         void *action;
18328         struct rte_flow_error error;
18329         uint8_t misc_mask;
18330         int i, err, ret = -ENOTSUP;
18331
18332         /*
18333          * Prepare a flow with a catch-all pattern and a drop action.
18334          * Use drop queue, because shared drop action may be unavailable.
18335          */
18336         action = priv->drop_queue.hrxq->action;
18337         if (action == NULL) {
18338                 DRV_LOG(ERR, "Priority discovery requires a drop action");
18339                 rte_errno = ENOTSUP;
18340                 return -rte_errno;
18341         }
18342         memset(&flow, 0, sizeof(flow));
18343         flow.handle = mlx5_ipool_zmalloc(pool, &flow.handle_idx);
18344         if (flow.handle == NULL) {
18345                 DRV_LOG(ERR, "Cannot create flow handle");
18346                 rte_errno = ENOMEM;
18347                 return -rte_errno;
18348         }
18349         flow.ingress = true;
18350         flow.dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
18351         flow.dv.actions[0] = action;
18352         flow.dv.actions_n = 1;
18353         memset(&eth, 0, sizeof(eth));
18354         flow_dv_translate_item_eth(matcher.mask.buf, flow.dv.value.buf,
18355                                    &item, /* inner */ false, /* group */ 0);
18356         matcher.crc = rte_raw_cksum(matcher.mask.buf, matcher.mask.size);
18357         for (i = 0; i < vprio_n; i++) {
18358                 /* Configure the next proposed maximum priority. */
18359                 matcher.priority = vprio[i] - 1;
18360                 memset(&tbl_key, 0, sizeof(tbl_key));
18361                 err = flow_dv_matcher_register(dev, &matcher, &tbl_key, &flow,
18362                                                /* tunnel */ NULL,
18363                                                /* group */ 0,
18364                                                &error);
18365                 if (err != 0) {
18366                         /* This action is pure SW and must always succeed. */
18367                         DRV_LOG(ERR, "Cannot register matcher");
18368                         ret = -rte_errno;
18369                         break;
18370                 }
18371                 /* Try to apply the flow to HW. */
18372                 misc_mask = flow_dv_matcher_enable(flow.dv.value.buf);
18373                 __flow_dv_adjust_buf_size(&flow.dv.value.size, misc_mask);
18374                 err = mlx5_flow_os_create_flow
18375                                 (flow.handle->dvh.matcher->matcher_object,
18376                                  (void *)&flow.dv.value, flow.dv.actions_n,
18377                                  flow.dv.actions, &flow.handle->drv_flow);
18378                 if (err == 0) {
18379                         claim_zero(mlx5_flow_os_destroy_flow
18380                                                 (flow.handle->drv_flow));
18381                         flow.handle->drv_flow = NULL;
18382                 }
18383                 claim_zero(flow_dv_matcher_release(dev, flow.handle));
18384                 if (err != 0)
18385                         break;
18386                 ret = vprio[i];
18387         }
18388         mlx5_ipool_free(pool, flow.handle_idx);
18389         /* Set rte_errno if no expected priority value matched. */
18390         if (ret < 0)
18391                 rte_errno = -ret;
18392         return ret;
18393 }
18394
18395 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
18396         .validate = flow_dv_validate,
18397         .prepare = flow_dv_prepare,
18398         .translate = flow_dv_translate,
18399         .apply = flow_dv_apply,
18400         .remove = flow_dv_remove,
18401         .destroy = flow_dv_destroy,
18402         .query = flow_dv_query,
18403         .create_mtr_tbls = flow_dv_create_mtr_tbls,
18404         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
18405         .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
18406         .create_meter = flow_dv_mtr_alloc,
18407         .free_meter = flow_dv_aso_mtr_release_to_pool,
18408         .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
18409         .create_mtr_acts = flow_dv_create_mtr_policy_acts,
18410         .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
18411         .create_policy_rules = flow_dv_create_policy_rules,
18412         .destroy_policy_rules = flow_dv_destroy_policy_rules,
18413         .create_def_policy = flow_dv_create_def_policy,
18414         .destroy_def_policy = flow_dv_destroy_def_policy,
18415         .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
18416         .meter_hierarchy_rule_create = flow_dv_meter_hierarchy_rule_create,
18417         .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
18418         .counter_alloc = flow_dv_counter_allocate,
18419         .counter_free = flow_dv_counter_free,
18420         .counter_query = flow_dv_counter_query,
18421         .get_aged_flows = flow_dv_get_aged_flows,
18422         .action_validate = flow_dv_action_validate,
18423         .action_create = flow_dv_action_create,
18424         .action_destroy = flow_dv_action_destroy,
18425         .action_update = flow_dv_action_update,
18426         .action_query = flow_dv_action_query,
18427         .sync_domain = flow_dv_sync_domain,
18428         .discover_priorities = flow_dv_discover_priorities,
18429         .item_create = flow_dv_item_create,
18430         .item_release = flow_dv_item_release,
18431 };
18432
18433 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */