pipeline: add check against loops
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ip.h>
20 #include <rte_gre.h>
21 #include <rte_vxlan.h>
22 #include <rte_gtp.h>
23 #include <rte_eal_paging.h>
24 #include <rte_mpls.h>
25 #include <rte_mtr.h>
26 #include <rte_mtr_driver.h>
27 #include <rte_tailq.h>
28
29 #include <mlx5_glue.h>
30 #include <mlx5_devx_cmds.h>
31 #include <mlx5_prm.h>
32 #include <mlx5_malloc.h>
33
34 #include "mlx5_defs.h"
35 #include "mlx5.h"
36 #include "mlx5_common_os.h"
37 #include "mlx5_flow.h"
38 #include "mlx5_flow_os.h"
39 #include "mlx5_rx.h"
40 #include "mlx5_tx.h"
41 #include "rte_pmd_mlx5.h"
42
43 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
44
45 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
46 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
47 #endif
48
49 #ifndef HAVE_MLX5DV_DR_ESWITCH
50 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
51 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
52 #endif
53 #endif
54
55 #ifndef HAVE_MLX5DV_DR
56 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
57 #endif
58
59 /* VLAN header definitions */
60 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
61 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
62 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
63 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
64 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
65
66 union flow_dv_attr {
67         struct {
68                 uint32_t valid:1;
69                 uint32_t ipv4:1;
70                 uint32_t ipv6:1;
71                 uint32_t tcp:1;
72                 uint32_t udp:1;
73                 uint32_t reserved:27;
74         };
75         uint32_t attr;
76 };
77
78 static int
79 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
80                              struct mlx5_flow_tbl_resource *tbl);
81
82 static int
83 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
84                                      uint32_t encap_decap_idx);
85
86 static int
87 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
88                                         uint32_t port_id);
89 static void
90 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
91
92 static int
93 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
94                                   uint32_t rix_jump);
95
96 static int16_t
97 flow_dv_get_esw_manager_vport_id(struct rte_eth_dev *dev)
98 {
99         struct mlx5_priv *priv = dev->data->dev_private;
100
101         if (priv->pci_dev == NULL)
102                 return 0;
103         switch (priv->pci_dev->id.device_id) {
104         case PCI_DEVICE_ID_MELLANOX_CONNECTX5BF:
105         case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF:
106         case PCI_DEVICE_ID_MELLANOX_CONNECTX7BF:
107                 return (int16_t)0xfffe;
108         default:
109                 return 0;
110         }
111 }
112
113 /**
114  * Initialize flow attributes structure according to flow items' types.
115  *
116  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
117  * mode. For tunnel mode, the items to be modified are the outermost ones.
118  *
119  * @param[in] item
120  *   Pointer to item specification.
121  * @param[out] attr
122  *   Pointer to flow attributes structure.
123  * @param[in] dev_flow
124  *   Pointer to the sub flow.
125  * @param[in] tunnel_decap
126  *   Whether action is after tunnel decapsulation.
127  */
128 static void
129 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
130                   struct mlx5_flow *dev_flow, bool tunnel_decap)
131 {
132         uint64_t layers = dev_flow->handle->layers;
133
134         /*
135          * If layers is already initialized, it means this dev_flow is the
136          * suffix flow, the layers flags is set by the prefix flow. Need to
137          * use the layer flags from prefix flow as the suffix flow may not
138          * have the user defined items as the flow is split.
139          */
140         if (layers) {
141                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
142                         attr->ipv4 = 1;
143                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
144                         attr->ipv6 = 1;
145                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
146                         attr->tcp = 1;
147                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
148                         attr->udp = 1;
149                 attr->valid = 1;
150                 return;
151         }
152         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
153                 uint8_t next_protocol = 0xff;
154                 switch (item->type) {
155                 case RTE_FLOW_ITEM_TYPE_GRE:
156                 case RTE_FLOW_ITEM_TYPE_NVGRE:
157                 case RTE_FLOW_ITEM_TYPE_VXLAN:
158                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
159                 case RTE_FLOW_ITEM_TYPE_GENEVE:
160                 case RTE_FLOW_ITEM_TYPE_MPLS:
161                         if (tunnel_decap)
162                                 attr->attr = 0;
163                         break;
164                 case RTE_FLOW_ITEM_TYPE_IPV4:
165                         if (!attr->ipv6)
166                                 attr->ipv4 = 1;
167                         if (item->mask != NULL &&
168                             ((const struct rte_flow_item_ipv4 *)
169                             item->mask)->hdr.next_proto_id)
170                                 next_protocol =
171                                     ((const struct rte_flow_item_ipv4 *)
172                                       (item->spec))->hdr.next_proto_id &
173                                     ((const struct rte_flow_item_ipv4 *)
174                                       (item->mask))->hdr.next_proto_id;
175                         if ((next_protocol == IPPROTO_IPIP ||
176                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
177                                 attr->attr = 0;
178                         break;
179                 case RTE_FLOW_ITEM_TYPE_IPV6:
180                         if (!attr->ipv4)
181                                 attr->ipv6 = 1;
182                         if (item->mask != NULL &&
183                             ((const struct rte_flow_item_ipv6 *)
184                             item->mask)->hdr.proto)
185                                 next_protocol =
186                                     ((const struct rte_flow_item_ipv6 *)
187                                       (item->spec))->hdr.proto &
188                                     ((const struct rte_flow_item_ipv6 *)
189                                       (item->mask))->hdr.proto;
190                         if ((next_protocol == IPPROTO_IPIP ||
191                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
192                                 attr->attr = 0;
193                         break;
194                 case RTE_FLOW_ITEM_TYPE_UDP:
195                         if (!attr->tcp)
196                                 attr->udp = 1;
197                         break;
198                 case RTE_FLOW_ITEM_TYPE_TCP:
199                         if (!attr->udp)
200                                 attr->tcp = 1;
201                         break;
202                 default:
203                         break;
204                 }
205         }
206         attr->valid = 1;
207 }
208
209 /*
210  * Convert rte_mtr_color to mlx5 color.
211  *
212  * @param[in] rcol
213  *   rte_mtr_color.
214  *
215  * @return
216  *   mlx5 color.
217  */
218 static inline int
219 rte_col_2_mlx5_col(enum rte_color rcol)
220 {
221         switch (rcol) {
222         case RTE_COLOR_GREEN:
223                 return MLX5_FLOW_COLOR_GREEN;
224         case RTE_COLOR_YELLOW:
225                 return MLX5_FLOW_COLOR_YELLOW;
226         case RTE_COLOR_RED:
227                 return MLX5_FLOW_COLOR_RED;
228         default:
229                 break;
230         }
231         return MLX5_FLOW_COLOR_UNDEFINED;
232 }
233
234 struct field_modify_info {
235         uint32_t size; /* Size of field in protocol header, in bytes. */
236         uint32_t offset; /* Offset of field in protocol header, in bytes. */
237         enum mlx5_modification_field id;
238 };
239
240 struct field_modify_info modify_eth[] = {
241         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
242         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
243         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
244         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
245         {0, 0, 0},
246 };
247
248 struct field_modify_info modify_vlan_out_first_vid[] = {
249         /* Size in bits !!! */
250         {12, 0, MLX5_MODI_OUT_FIRST_VID},
251         {0, 0, 0},
252 };
253
254 struct field_modify_info modify_ipv4[] = {
255         {1,  1, MLX5_MODI_OUT_IP_DSCP},
256         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
257         {4, 12, MLX5_MODI_OUT_SIPV4},
258         {4, 16, MLX5_MODI_OUT_DIPV4},
259         {0, 0, 0},
260 };
261
262 struct field_modify_info modify_ipv6[] = {
263         {1,  0, MLX5_MODI_OUT_IP_DSCP},
264         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
265         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
266         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
267         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
268         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
269         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
270         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
271         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
272         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
273         {0, 0, 0},
274 };
275
276 struct field_modify_info modify_udp[] = {
277         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
278         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
279         {0, 0, 0},
280 };
281
282 struct field_modify_info modify_tcp[] = {
283         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
284         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
285         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
286         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
287         {0, 0, 0},
288 };
289
290 static void
291 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
292                           uint8_t next_protocol, uint64_t *item_flags,
293                           int *tunnel)
294 {
295         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
296                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
297         if (next_protocol == IPPROTO_IPIP) {
298                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
299                 *tunnel = 1;
300         }
301         if (next_protocol == IPPROTO_IPV6) {
302                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
303                 *tunnel = 1;
304         }
305 }
306
307 static inline struct mlx5_hlist *
308 flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,
309                      const char *name, uint32_t size, bool direct_key,
310                      bool lcores_share, void *ctx,
311                      mlx5_list_create_cb cb_create,
312                      mlx5_list_match_cb cb_match,
313                      mlx5_list_remove_cb cb_remove,
314                      mlx5_list_clone_cb cb_clone,
315                      mlx5_list_clone_free_cb cb_clone_free)
316 {
317         struct mlx5_hlist *hl;
318         struct mlx5_hlist *expected = NULL;
319         char s[MLX5_NAME_SIZE];
320
321         hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
322         if (likely(hl))
323                 return hl;
324         snprintf(s, sizeof(s), "%s_%s", sh->ibdev_name, name);
325         hl = mlx5_hlist_create(s, size, direct_key, lcores_share,
326                         ctx, cb_create, cb_match, cb_remove, cb_clone,
327                         cb_clone_free);
328         if (!hl) {
329                 DRV_LOG(ERR, "%s hash creation failed", name);
330                 rte_errno = ENOMEM;
331                 return NULL;
332         }
333         if (!__atomic_compare_exchange_n(phl, &expected, hl, false,
334                                          __ATOMIC_SEQ_CST,
335                                          __ATOMIC_SEQ_CST)) {
336                 mlx5_hlist_destroy(hl);
337                 hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
338         }
339         return hl;
340 }
341
342 /* Update VLAN's VID/PCP based on input rte_flow_action.
343  *
344  * @param[in] action
345  *   Pointer to struct rte_flow_action.
346  * @param[out] vlan
347  *   Pointer to struct rte_vlan_hdr.
348  */
349 static void
350 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
351                          struct rte_vlan_hdr *vlan)
352 {
353         uint16_t vlan_tci;
354         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
355                 vlan_tci =
356                     ((const struct rte_flow_action_of_set_vlan_pcp *)
357                                                action->conf)->vlan_pcp;
358                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
359                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
360                 vlan->vlan_tci |= vlan_tci;
361         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
362                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
363                 vlan->vlan_tci |= rte_be_to_cpu_16
364                     (((const struct rte_flow_action_of_set_vlan_vid *)
365                                              action->conf)->vlan_vid);
366         }
367 }
368
369 /**
370  * Fetch 1, 2, 3 or 4 byte field from the byte array
371  * and return as unsigned integer in host-endian format.
372  *
373  * @param[in] data
374  *   Pointer to data array.
375  * @param[in] size
376  *   Size of field to extract.
377  *
378  * @return
379  *   converted field in host endian format.
380  */
381 static inline uint32_t
382 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
383 {
384         uint32_t ret;
385
386         switch (size) {
387         case 1:
388                 ret = *data;
389                 break;
390         case 2:
391                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
392                 break;
393         case 3:
394                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
395                 ret = (ret << 8) | *(data + sizeof(uint16_t));
396                 break;
397         case 4:
398                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
399                 break;
400         default:
401                 MLX5_ASSERT(false);
402                 ret = 0;
403                 break;
404         }
405         return ret;
406 }
407
408 /**
409  * Convert modify-header action to DV specification.
410  *
411  * Data length of each action is determined by provided field description
412  * and the item mask. Data bit offset and width of each action is determined
413  * by provided item mask.
414  *
415  * @param[in] item
416  *   Pointer to item specification.
417  * @param[in] field
418  *   Pointer to field modification information.
419  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
420  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
421  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
422  * @param[in] dcopy
423  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
424  *   Negative offset value sets the same offset as source offset.
425  *   size field is ignored, value is taken from source field.
426  * @param[in,out] resource
427  *   Pointer to the modify-header resource.
428  * @param[in] type
429  *   Type of modification.
430  * @param[out] error
431  *   Pointer to the error structure.
432  *
433  * @return
434  *   0 on success, a negative errno value otherwise and rte_errno is set.
435  */
436 static int
437 flow_dv_convert_modify_action(struct rte_flow_item *item,
438                               struct field_modify_info *field,
439                               struct field_modify_info *dcopy,
440                               struct mlx5_flow_dv_modify_hdr_resource *resource,
441                               uint32_t type, struct rte_flow_error *error)
442 {
443         uint32_t i = resource->actions_num;
444         struct mlx5_modification_cmd *actions = resource->actions;
445         uint32_t carry_b = 0;
446
447         /*
448          * The item and mask are provided in big-endian format.
449          * The fields should be presented as in big-endian format either.
450          * Mask must be always present, it defines the actual field width.
451          */
452         MLX5_ASSERT(item->mask);
453         MLX5_ASSERT(field->size);
454         do {
455                 uint32_t size_b;
456                 uint32_t off_b;
457                 uint32_t mask;
458                 uint32_t data;
459                 bool next_field = true;
460                 bool next_dcopy = true;
461
462                 if (i >= MLX5_MAX_MODIFY_NUM)
463                         return rte_flow_error_set(error, EINVAL,
464                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
465                                  "too many items to modify");
466                 /* Fetch variable byte size mask from the array. */
467                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
468                                            field->offset, field->size);
469                 if (!mask) {
470                         ++field;
471                         continue;
472                 }
473                 /* Deduce actual data width in bits from mask value. */
474                 off_b = rte_bsf32(mask) + carry_b;
475                 size_b = sizeof(uint32_t) * CHAR_BIT -
476                          off_b - __builtin_clz(mask);
477                 MLX5_ASSERT(size_b);
478                 actions[i] = (struct mlx5_modification_cmd) {
479                         .action_type = type,
480                         .field = field->id,
481                         .offset = off_b,
482                         .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ?
483                                 0 : size_b,
484                 };
485                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
486                         MLX5_ASSERT(dcopy);
487                         actions[i].dst_field = dcopy->id;
488                         actions[i].dst_offset =
489                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
490                         /* Convert entire record to big-endian format. */
491                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
492                         /*
493                          * Destination field overflow. Copy leftovers of
494                          * a source field to the next destination field.
495                          */
496                         carry_b = 0;
497                         if ((size_b > dcopy->size * CHAR_BIT - dcopy->offset) &&
498                             dcopy->size != 0) {
499                                 actions[i].length =
500                                         dcopy->size * CHAR_BIT - dcopy->offset;
501                                 carry_b = actions[i].length;
502                                 next_field = false;
503                         }
504                         /*
505                          * Not enough bits in a source filed to fill a
506                          * destination field. Switch to the next source.
507                          */
508                         if ((size_b < dcopy->size * CHAR_BIT - dcopy->offset) &&
509                             (size_b == field->size * CHAR_BIT - off_b)) {
510                                 actions[i].length =
511                                         field->size * CHAR_BIT - off_b;
512                                 dcopy->offset += actions[i].length;
513                                 next_dcopy = false;
514                         }
515                         if (next_dcopy)
516                                 ++dcopy;
517                 } else {
518                         MLX5_ASSERT(item->spec);
519                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
520                                                    field->offset, field->size);
521                         /* Shift out the trailing masked bits from data. */
522                         data = (data & mask) >> off_b;
523                         actions[i].data1 = rte_cpu_to_be_32(data);
524                 }
525                 /* Convert entire record to expected big-endian format. */
526                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
527                 if (next_field)
528                         ++field;
529                 ++i;
530         } while (field->size);
531         if (resource->actions_num == i)
532                 return rte_flow_error_set(error, EINVAL,
533                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
534                                           "invalid modification flow item");
535         resource->actions_num = i;
536         return 0;
537 }
538
539 /**
540  * Convert modify-header set IPv4 address action to DV specification.
541  *
542  * @param[in,out] resource
543  *   Pointer to the modify-header resource.
544  * @param[in] action
545  *   Pointer to action specification.
546  * @param[out] error
547  *   Pointer to the error structure.
548  *
549  * @return
550  *   0 on success, a negative errno value otherwise and rte_errno is set.
551  */
552 static int
553 flow_dv_convert_action_modify_ipv4
554                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
555                          const struct rte_flow_action *action,
556                          struct rte_flow_error *error)
557 {
558         const struct rte_flow_action_set_ipv4 *conf =
559                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
560         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
561         struct rte_flow_item_ipv4 ipv4;
562         struct rte_flow_item_ipv4 ipv4_mask;
563
564         memset(&ipv4, 0, sizeof(ipv4));
565         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
566         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
567                 ipv4.hdr.src_addr = conf->ipv4_addr;
568                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
569         } else {
570                 ipv4.hdr.dst_addr = conf->ipv4_addr;
571                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
572         }
573         item.spec = &ipv4;
574         item.mask = &ipv4_mask;
575         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
576                                              MLX5_MODIFICATION_TYPE_SET, error);
577 }
578
579 /**
580  * Convert modify-header set IPv6 address action to DV specification.
581  *
582  * @param[in,out] resource
583  *   Pointer to the modify-header resource.
584  * @param[in] action
585  *   Pointer to action specification.
586  * @param[out] error
587  *   Pointer to the error structure.
588  *
589  * @return
590  *   0 on success, a negative errno value otherwise and rte_errno is set.
591  */
592 static int
593 flow_dv_convert_action_modify_ipv6
594                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
595                          const struct rte_flow_action *action,
596                          struct rte_flow_error *error)
597 {
598         const struct rte_flow_action_set_ipv6 *conf =
599                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
600         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
601         struct rte_flow_item_ipv6 ipv6;
602         struct rte_flow_item_ipv6 ipv6_mask;
603
604         memset(&ipv6, 0, sizeof(ipv6));
605         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
606         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
607                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
608                        sizeof(ipv6.hdr.src_addr));
609                 memcpy(&ipv6_mask.hdr.src_addr,
610                        &rte_flow_item_ipv6_mask.hdr.src_addr,
611                        sizeof(ipv6.hdr.src_addr));
612         } else {
613                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
614                        sizeof(ipv6.hdr.dst_addr));
615                 memcpy(&ipv6_mask.hdr.dst_addr,
616                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
617                        sizeof(ipv6.hdr.dst_addr));
618         }
619         item.spec = &ipv6;
620         item.mask = &ipv6_mask;
621         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
622                                              MLX5_MODIFICATION_TYPE_SET, error);
623 }
624
625 /**
626  * Convert modify-header set MAC address action to DV specification.
627  *
628  * @param[in,out] resource
629  *   Pointer to the modify-header resource.
630  * @param[in] action
631  *   Pointer to action specification.
632  * @param[out] error
633  *   Pointer to the error structure.
634  *
635  * @return
636  *   0 on success, a negative errno value otherwise and rte_errno is set.
637  */
638 static int
639 flow_dv_convert_action_modify_mac
640                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
641                          const struct rte_flow_action *action,
642                          struct rte_flow_error *error)
643 {
644         const struct rte_flow_action_set_mac *conf =
645                 (const struct rte_flow_action_set_mac *)(action->conf);
646         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
647         struct rte_flow_item_eth eth;
648         struct rte_flow_item_eth eth_mask;
649
650         memset(&eth, 0, sizeof(eth));
651         memset(&eth_mask, 0, sizeof(eth_mask));
652         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
653                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
654                        sizeof(eth.src.addr_bytes));
655                 memcpy(&eth_mask.src.addr_bytes,
656                        &rte_flow_item_eth_mask.src.addr_bytes,
657                        sizeof(eth_mask.src.addr_bytes));
658         } else {
659                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
660                        sizeof(eth.dst.addr_bytes));
661                 memcpy(&eth_mask.dst.addr_bytes,
662                        &rte_flow_item_eth_mask.dst.addr_bytes,
663                        sizeof(eth_mask.dst.addr_bytes));
664         }
665         item.spec = &eth;
666         item.mask = &eth_mask;
667         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
668                                              MLX5_MODIFICATION_TYPE_SET, error);
669 }
670
671 /**
672  * Convert modify-header set VLAN VID action to DV specification.
673  *
674  * @param[in,out] resource
675  *   Pointer to the modify-header resource.
676  * @param[in] action
677  *   Pointer to action specification.
678  * @param[out] error
679  *   Pointer to the error structure.
680  *
681  * @return
682  *   0 on success, a negative errno value otherwise and rte_errno is set.
683  */
684 static int
685 flow_dv_convert_action_modify_vlan_vid
686                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
687                          const struct rte_flow_action *action,
688                          struct rte_flow_error *error)
689 {
690         const struct rte_flow_action_of_set_vlan_vid *conf =
691                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
692         int i = resource->actions_num;
693         struct mlx5_modification_cmd *actions = resource->actions;
694         struct field_modify_info *field = modify_vlan_out_first_vid;
695
696         if (i >= MLX5_MAX_MODIFY_NUM)
697                 return rte_flow_error_set(error, EINVAL,
698                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
699                          "too many items to modify");
700         actions[i] = (struct mlx5_modification_cmd) {
701                 .action_type = MLX5_MODIFICATION_TYPE_SET,
702                 .field = field->id,
703                 .length = field->size,
704                 .offset = field->offset,
705         };
706         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
707         actions[i].data1 = conf->vlan_vid;
708         actions[i].data1 = actions[i].data1 << 16;
709         resource->actions_num = ++i;
710         return 0;
711 }
712
713 /**
714  * Convert modify-header set TP action to DV specification.
715  *
716  * @param[in,out] resource
717  *   Pointer to the modify-header resource.
718  * @param[in] action
719  *   Pointer to action specification.
720  * @param[in] items
721  *   Pointer to rte_flow_item objects list.
722  * @param[in] attr
723  *   Pointer to flow attributes structure.
724  * @param[in] dev_flow
725  *   Pointer to the sub flow.
726  * @param[in] tunnel_decap
727  *   Whether action is after tunnel decapsulation.
728  * @param[out] error
729  *   Pointer to the error structure.
730  *
731  * @return
732  *   0 on success, a negative errno value otherwise and rte_errno is set.
733  */
734 static int
735 flow_dv_convert_action_modify_tp
736                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
737                          const struct rte_flow_action *action,
738                          const struct rte_flow_item *items,
739                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
740                          bool tunnel_decap, struct rte_flow_error *error)
741 {
742         const struct rte_flow_action_set_tp *conf =
743                 (const struct rte_flow_action_set_tp *)(action->conf);
744         struct rte_flow_item item;
745         struct rte_flow_item_udp udp;
746         struct rte_flow_item_udp udp_mask;
747         struct rte_flow_item_tcp tcp;
748         struct rte_flow_item_tcp tcp_mask;
749         struct field_modify_info *field;
750
751         if (!attr->valid)
752                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
753         if (attr->udp) {
754                 memset(&udp, 0, sizeof(udp));
755                 memset(&udp_mask, 0, sizeof(udp_mask));
756                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
757                         udp.hdr.src_port = conf->port;
758                         udp_mask.hdr.src_port =
759                                         rte_flow_item_udp_mask.hdr.src_port;
760                 } else {
761                         udp.hdr.dst_port = conf->port;
762                         udp_mask.hdr.dst_port =
763                                         rte_flow_item_udp_mask.hdr.dst_port;
764                 }
765                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
766                 item.spec = &udp;
767                 item.mask = &udp_mask;
768                 field = modify_udp;
769         } else {
770                 MLX5_ASSERT(attr->tcp);
771                 memset(&tcp, 0, sizeof(tcp));
772                 memset(&tcp_mask, 0, sizeof(tcp_mask));
773                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
774                         tcp.hdr.src_port = conf->port;
775                         tcp_mask.hdr.src_port =
776                                         rte_flow_item_tcp_mask.hdr.src_port;
777                 } else {
778                         tcp.hdr.dst_port = conf->port;
779                         tcp_mask.hdr.dst_port =
780                                         rte_flow_item_tcp_mask.hdr.dst_port;
781                 }
782                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
783                 item.spec = &tcp;
784                 item.mask = &tcp_mask;
785                 field = modify_tcp;
786         }
787         return flow_dv_convert_modify_action(&item, field, NULL, resource,
788                                              MLX5_MODIFICATION_TYPE_SET, error);
789 }
790
791 /**
792  * Convert modify-header set TTL action to DV specification.
793  *
794  * @param[in,out] resource
795  *   Pointer to the modify-header resource.
796  * @param[in] action
797  *   Pointer to action specification.
798  * @param[in] items
799  *   Pointer to rte_flow_item objects list.
800  * @param[in] attr
801  *   Pointer to flow attributes structure.
802  * @param[in] dev_flow
803  *   Pointer to the sub flow.
804  * @param[in] tunnel_decap
805  *   Whether action is after tunnel decapsulation.
806  * @param[out] error
807  *   Pointer to the error structure.
808  *
809  * @return
810  *   0 on success, a negative errno value otherwise and rte_errno is set.
811  */
812 static int
813 flow_dv_convert_action_modify_ttl
814                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
815                          const struct rte_flow_action *action,
816                          const struct rte_flow_item *items,
817                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
818                          bool tunnel_decap, struct rte_flow_error *error)
819 {
820         const struct rte_flow_action_set_ttl *conf =
821                 (const struct rte_flow_action_set_ttl *)(action->conf);
822         struct rte_flow_item item;
823         struct rte_flow_item_ipv4 ipv4;
824         struct rte_flow_item_ipv4 ipv4_mask;
825         struct rte_flow_item_ipv6 ipv6;
826         struct rte_flow_item_ipv6 ipv6_mask;
827         struct field_modify_info *field;
828
829         if (!attr->valid)
830                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
831         if (attr->ipv4) {
832                 memset(&ipv4, 0, sizeof(ipv4));
833                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
834                 ipv4.hdr.time_to_live = conf->ttl_value;
835                 ipv4_mask.hdr.time_to_live = 0xFF;
836                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
837                 item.spec = &ipv4;
838                 item.mask = &ipv4_mask;
839                 field = modify_ipv4;
840         } else {
841                 MLX5_ASSERT(attr->ipv6);
842                 memset(&ipv6, 0, sizeof(ipv6));
843                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
844                 ipv6.hdr.hop_limits = conf->ttl_value;
845                 ipv6_mask.hdr.hop_limits = 0xFF;
846                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
847                 item.spec = &ipv6;
848                 item.mask = &ipv6_mask;
849                 field = modify_ipv6;
850         }
851         return flow_dv_convert_modify_action(&item, field, NULL, resource,
852                                              MLX5_MODIFICATION_TYPE_SET, error);
853 }
854
855 /**
856  * Convert modify-header decrement TTL action to DV specification.
857  *
858  * @param[in,out] resource
859  *   Pointer to the modify-header resource.
860  * @param[in] action
861  *   Pointer to action specification.
862  * @param[in] items
863  *   Pointer to rte_flow_item objects list.
864  * @param[in] attr
865  *   Pointer to flow attributes structure.
866  * @param[in] dev_flow
867  *   Pointer to the sub flow.
868  * @param[in] tunnel_decap
869  *   Whether action is after tunnel decapsulation.
870  * @param[out] error
871  *   Pointer to the error structure.
872  *
873  * @return
874  *   0 on success, a negative errno value otherwise and rte_errno is set.
875  */
876 static int
877 flow_dv_convert_action_modify_dec_ttl
878                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
879                          const struct rte_flow_item *items,
880                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
881                          bool tunnel_decap, struct rte_flow_error *error)
882 {
883         struct rte_flow_item item;
884         struct rte_flow_item_ipv4 ipv4;
885         struct rte_flow_item_ipv4 ipv4_mask;
886         struct rte_flow_item_ipv6 ipv6;
887         struct rte_flow_item_ipv6 ipv6_mask;
888         struct field_modify_info *field;
889
890         if (!attr->valid)
891                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
892         if (attr->ipv4) {
893                 memset(&ipv4, 0, sizeof(ipv4));
894                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
895                 ipv4.hdr.time_to_live = 0xFF;
896                 ipv4_mask.hdr.time_to_live = 0xFF;
897                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
898                 item.spec = &ipv4;
899                 item.mask = &ipv4_mask;
900                 field = modify_ipv4;
901         } else {
902                 MLX5_ASSERT(attr->ipv6);
903                 memset(&ipv6, 0, sizeof(ipv6));
904                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
905                 ipv6.hdr.hop_limits = 0xFF;
906                 ipv6_mask.hdr.hop_limits = 0xFF;
907                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
908                 item.spec = &ipv6;
909                 item.mask = &ipv6_mask;
910                 field = modify_ipv6;
911         }
912         return flow_dv_convert_modify_action(&item, field, NULL, resource,
913                                              MLX5_MODIFICATION_TYPE_ADD, error);
914 }
915
916 /**
917  * Convert modify-header increment/decrement TCP Sequence number
918  * to DV specification.
919  *
920  * @param[in,out] resource
921  *   Pointer to the modify-header resource.
922  * @param[in] action
923  *   Pointer to action specification.
924  * @param[out] error
925  *   Pointer to the error structure.
926  *
927  * @return
928  *   0 on success, a negative errno value otherwise and rte_errno is set.
929  */
930 static int
931 flow_dv_convert_action_modify_tcp_seq
932                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
933                          const struct rte_flow_action *action,
934                          struct rte_flow_error *error)
935 {
936         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
937         uint64_t value = rte_be_to_cpu_32(*conf);
938         struct rte_flow_item item;
939         struct rte_flow_item_tcp tcp;
940         struct rte_flow_item_tcp tcp_mask;
941
942         memset(&tcp, 0, sizeof(tcp));
943         memset(&tcp_mask, 0, sizeof(tcp_mask));
944         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
945                 /*
946                  * The HW has no decrement operation, only increment operation.
947                  * To simulate decrement X from Y using increment operation
948                  * we need to add UINT32_MAX X times to Y.
949                  * Each adding of UINT32_MAX decrements Y by 1.
950                  */
951                 value *= UINT32_MAX;
952         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
953         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
954         item.type = RTE_FLOW_ITEM_TYPE_TCP;
955         item.spec = &tcp;
956         item.mask = &tcp_mask;
957         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
958                                              MLX5_MODIFICATION_TYPE_ADD, error);
959 }
960
961 /**
962  * Convert modify-header increment/decrement TCP Acknowledgment number
963  * to DV specification.
964  *
965  * @param[in,out] resource
966  *   Pointer to the modify-header resource.
967  * @param[in] action
968  *   Pointer to action specification.
969  * @param[out] error
970  *   Pointer to the error structure.
971  *
972  * @return
973  *   0 on success, a negative errno value otherwise and rte_errno is set.
974  */
975 static int
976 flow_dv_convert_action_modify_tcp_ack
977                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
978                          const struct rte_flow_action *action,
979                          struct rte_flow_error *error)
980 {
981         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
982         uint64_t value = rte_be_to_cpu_32(*conf);
983         struct rte_flow_item item;
984         struct rte_flow_item_tcp tcp;
985         struct rte_flow_item_tcp tcp_mask;
986
987         memset(&tcp, 0, sizeof(tcp));
988         memset(&tcp_mask, 0, sizeof(tcp_mask));
989         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
990                 /*
991                  * The HW has no decrement operation, only increment operation.
992                  * To simulate decrement X from Y using increment operation
993                  * we need to add UINT32_MAX X times to Y.
994                  * Each adding of UINT32_MAX decrements Y by 1.
995                  */
996                 value *= UINT32_MAX;
997         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
998         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
999         item.type = RTE_FLOW_ITEM_TYPE_TCP;
1000         item.spec = &tcp;
1001         item.mask = &tcp_mask;
1002         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
1003                                              MLX5_MODIFICATION_TYPE_ADD, error);
1004 }
1005
1006 static enum mlx5_modification_field reg_to_field[] = {
1007         [REG_NON] = MLX5_MODI_OUT_NONE,
1008         [REG_A] = MLX5_MODI_META_DATA_REG_A,
1009         [REG_B] = MLX5_MODI_META_DATA_REG_B,
1010         [REG_C_0] = MLX5_MODI_META_REG_C_0,
1011         [REG_C_1] = MLX5_MODI_META_REG_C_1,
1012         [REG_C_2] = MLX5_MODI_META_REG_C_2,
1013         [REG_C_3] = MLX5_MODI_META_REG_C_3,
1014         [REG_C_4] = MLX5_MODI_META_REG_C_4,
1015         [REG_C_5] = MLX5_MODI_META_REG_C_5,
1016         [REG_C_6] = MLX5_MODI_META_REG_C_6,
1017         [REG_C_7] = MLX5_MODI_META_REG_C_7,
1018 };
1019
1020 /**
1021  * Convert register set to DV specification.
1022  *
1023  * @param[in,out] resource
1024  *   Pointer to the modify-header resource.
1025  * @param[in] action
1026  *   Pointer to action specification.
1027  * @param[out] error
1028  *   Pointer to the error structure.
1029  *
1030  * @return
1031  *   0 on success, a negative errno value otherwise and rte_errno is set.
1032  */
1033 static int
1034 flow_dv_convert_action_set_reg
1035                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1036                          const struct rte_flow_action *action,
1037                          struct rte_flow_error *error)
1038 {
1039         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
1040         struct mlx5_modification_cmd *actions = resource->actions;
1041         uint32_t i = resource->actions_num;
1042
1043         if (i >= MLX5_MAX_MODIFY_NUM)
1044                 return rte_flow_error_set(error, EINVAL,
1045                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1046                                           "too many items to modify");
1047         MLX5_ASSERT(conf->id != REG_NON);
1048         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
1049         actions[i] = (struct mlx5_modification_cmd) {
1050                 .action_type = MLX5_MODIFICATION_TYPE_SET,
1051                 .field = reg_to_field[conf->id],
1052                 .offset = conf->offset,
1053                 .length = conf->length,
1054         };
1055         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1056         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1057         ++i;
1058         resource->actions_num = i;
1059         return 0;
1060 }
1061
1062 /**
1063  * Convert SET_TAG action to DV specification.
1064  *
1065  * @param[in] dev
1066  *   Pointer to the rte_eth_dev structure.
1067  * @param[in,out] resource
1068  *   Pointer to the modify-header resource.
1069  * @param[in] conf
1070  *   Pointer to action specification.
1071  * @param[out] error
1072  *   Pointer to the error structure.
1073  *
1074  * @return
1075  *   0 on success, a negative errno value otherwise and rte_errno is set.
1076  */
1077 static int
1078 flow_dv_convert_action_set_tag
1079                         (struct rte_eth_dev *dev,
1080                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1081                          const struct rte_flow_action_set_tag *conf,
1082                          struct rte_flow_error *error)
1083 {
1084         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1085         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1086         struct rte_flow_item item = {
1087                 .spec = &data,
1088                 .mask = &mask,
1089         };
1090         struct field_modify_info reg_c_x[] = {
1091                 [1] = {0, 0, 0},
1092         };
1093         enum mlx5_modification_field reg_type;
1094         int ret;
1095
1096         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1097         if (ret < 0)
1098                 return ret;
1099         MLX5_ASSERT(ret != REG_NON);
1100         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1101         reg_type = reg_to_field[ret];
1102         MLX5_ASSERT(reg_type > 0);
1103         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1104         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1105                                              MLX5_MODIFICATION_TYPE_SET, error);
1106 }
1107
1108 /**
1109  * Convert internal COPY_REG action to DV specification.
1110  *
1111  * @param[in] dev
1112  *   Pointer to the rte_eth_dev structure.
1113  * @param[in,out] res
1114  *   Pointer to the modify-header resource.
1115  * @param[in] action
1116  *   Pointer to action specification.
1117  * @param[out] error
1118  *   Pointer to the error structure.
1119  *
1120  * @return
1121  *   0 on success, a negative errno value otherwise and rte_errno is set.
1122  */
1123 static int
1124 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1125                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1126                                  const struct rte_flow_action *action,
1127                                  struct rte_flow_error *error)
1128 {
1129         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1130         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1131         struct rte_flow_item item = {
1132                 .spec = NULL,
1133                 .mask = &mask,
1134         };
1135         struct field_modify_info reg_src[] = {
1136                 {4, 0, reg_to_field[conf->src]},
1137                 {0, 0, 0},
1138         };
1139         struct field_modify_info reg_dst = {
1140                 .offset = 0,
1141                 .id = reg_to_field[conf->dst],
1142         };
1143         /* Adjust reg_c[0] usage according to reported mask. */
1144         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1145                 struct mlx5_priv *priv = dev->data->dev_private;
1146                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1147
1148                 MLX5_ASSERT(reg_c0);
1149                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1150                 if (conf->dst == REG_C_0) {
1151                         /* Copy to reg_c[0], within mask only. */
1152                         reg_dst.offset = rte_bsf32(reg_c0);
1153                         mask = rte_cpu_to_be_32(reg_c0 >> reg_dst.offset);
1154                 } else {
1155                         reg_dst.offset = 0;
1156                         mask = rte_cpu_to_be_32(reg_c0);
1157                 }
1158         }
1159         return flow_dv_convert_modify_action(&item,
1160                                              reg_src, &reg_dst, res,
1161                                              MLX5_MODIFICATION_TYPE_COPY,
1162                                              error);
1163 }
1164
1165 /**
1166  * Convert MARK action to DV specification. This routine is used
1167  * in extensive metadata only and requires metadata register to be
1168  * handled. In legacy mode hardware tag resource is engaged.
1169  *
1170  * @param[in] dev
1171  *   Pointer to the rte_eth_dev structure.
1172  * @param[in] conf
1173  *   Pointer to MARK action specification.
1174  * @param[in,out] resource
1175  *   Pointer to the modify-header resource.
1176  * @param[out] error
1177  *   Pointer to the error structure.
1178  *
1179  * @return
1180  *   0 on success, a negative errno value otherwise and rte_errno is set.
1181  */
1182 static int
1183 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1184                             const struct rte_flow_action_mark *conf,
1185                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1186                             struct rte_flow_error *error)
1187 {
1188         struct mlx5_priv *priv = dev->data->dev_private;
1189         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1190                                            priv->sh->dv_mark_mask);
1191         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1192         struct rte_flow_item item = {
1193                 .spec = &data,
1194                 .mask = &mask,
1195         };
1196         struct field_modify_info reg_c_x[] = {
1197                 [1] = {0, 0, 0},
1198         };
1199         int reg;
1200
1201         if (!mask)
1202                 return rte_flow_error_set(error, EINVAL,
1203                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1204                                           NULL, "zero mark action mask");
1205         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1206         if (reg < 0)
1207                 return reg;
1208         MLX5_ASSERT(reg > 0);
1209         if (reg == REG_C_0) {
1210                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1211                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1212
1213                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1214                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1215                 mask = rte_cpu_to_be_32(mask << shl_c0);
1216         }
1217         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1218         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1219                                              MLX5_MODIFICATION_TYPE_SET, error);
1220 }
1221
1222 /**
1223  * Get metadata register index for specified steering domain.
1224  *
1225  * @param[in] dev
1226  *   Pointer to the rte_eth_dev structure.
1227  * @param[in] attr
1228  *   Attributes of flow to determine steering domain.
1229  * @param[out] error
1230  *   Pointer to the error structure.
1231  *
1232  * @return
1233  *   positive index on success, a negative errno value otherwise
1234  *   and rte_errno is set.
1235  */
1236 static enum modify_reg
1237 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1238                          const struct rte_flow_attr *attr,
1239                          struct rte_flow_error *error)
1240 {
1241         int reg =
1242                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1243                                           MLX5_METADATA_FDB :
1244                                             attr->egress ?
1245                                             MLX5_METADATA_TX :
1246                                             MLX5_METADATA_RX, 0, error);
1247         if (reg < 0)
1248                 return rte_flow_error_set(error,
1249                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1250                                           NULL, "unavailable "
1251                                           "metadata register");
1252         return reg;
1253 }
1254
1255 /**
1256  * Convert SET_META action to DV specification.
1257  *
1258  * @param[in] dev
1259  *   Pointer to the rte_eth_dev structure.
1260  * @param[in,out] resource
1261  *   Pointer to the modify-header resource.
1262  * @param[in] attr
1263  *   Attributes of flow that includes this item.
1264  * @param[in] conf
1265  *   Pointer to action specification.
1266  * @param[out] error
1267  *   Pointer to the error structure.
1268  *
1269  * @return
1270  *   0 on success, a negative errno value otherwise and rte_errno is set.
1271  */
1272 static int
1273 flow_dv_convert_action_set_meta
1274                         (struct rte_eth_dev *dev,
1275                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1276                          const struct rte_flow_attr *attr,
1277                          const struct rte_flow_action_set_meta *conf,
1278                          struct rte_flow_error *error)
1279 {
1280         uint32_t mask = rte_cpu_to_be_32(conf->mask);
1281         uint32_t data = rte_cpu_to_be_32(conf->data) & mask;
1282         struct rte_flow_item item = {
1283                 .spec = &data,
1284                 .mask = &mask,
1285         };
1286         struct field_modify_info reg_c_x[] = {
1287                 [1] = {0, 0, 0},
1288         };
1289         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1290
1291         if (reg < 0)
1292                 return reg;
1293         MLX5_ASSERT(reg != REG_NON);
1294         if (reg == REG_C_0) {
1295                 struct mlx5_priv *priv = dev->data->dev_private;
1296                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1297                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1298
1299                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1300                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1301                 mask = rte_cpu_to_be_32(mask << shl_c0);
1302         }
1303         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1304         /* The routine expects parameters in memory as big-endian ones. */
1305         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1306                                              MLX5_MODIFICATION_TYPE_SET, error);
1307 }
1308
1309 /**
1310  * Convert modify-header set IPv4 DSCP action to DV specification.
1311  *
1312  * @param[in,out] resource
1313  *   Pointer to the modify-header resource.
1314  * @param[in] action
1315  *   Pointer to action specification.
1316  * @param[out] error
1317  *   Pointer to the error structure.
1318  *
1319  * @return
1320  *   0 on success, a negative errno value otherwise and rte_errno is set.
1321  */
1322 static int
1323 flow_dv_convert_action_modify_ipv4_dscp
1324                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1325                          const struct rte_flow_action *action,
1326                          struct rte_flow_error *error)
1327 {
1328         const struct rte_flow_action_set_dscp *conf =
1329                 (const struct rte_flow_action_set_dscp *)(action->conf);
1330         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1331         struct rte_flow_item_ipv4 ipv4;
1332         struct rte_flow_item_ipv4 ipv4_mask;
1333
1334         memset(&ipv4, 0, sizeof(ipv4));
1335         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1336         ipv4.hdr.type_of_service = conf->dscp;
1337         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1338         item.spec = &ipv4;
1339         item.mask = &ipv4_mask;
1340         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1341                                              MLX5_MODIFICATION_TYPE_SET, error);
1342 }
1343
1344 /**
1345  * Convert modify-header set IPv6 DSCP action to DV specification.
1346  *
1347  * @param[in,out] resource
1348  *   Pointer to the modify-header resource.
1349  * @param[in] action
1350  *   Pointer to action specification.
1351  * @param[out] error
1352  *   Pointer to the error structure.
1353  *
1354  * @return
1355  *   0 on success, a negative errno value otherwise and rte_errno is set.
1356  */
1357 static int
1358 flow_dv_convert_action_modify_ipv6_dscp
1359                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1360                          const struct rte_flow_action *action,
1361                          struct rte_flow_error *error)
1362 {
1363         const struct rte_flow_action_set_dscp *conf =
1364                 (const struct rte_flow_action_set_dscp *)(action->conf);
1365         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1366         struct rte_flow_item_ipv6 ipv6;
1367         struct rte_flow_item_ipv6 ipv6_mask;
1368
1369         memset(&ipv6, 0, sizeof(ipv6));
1370         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1371         /*
1372          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1373          * rdma-core only accept the DSCP bits byte aligned start from
1374          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1375          * bits in IPv6 case as rdma-core requires byte aligned value.
1376          */
1377         ipv6.hdr.vtc_flow = conf->dscp;
1378         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1379         item.spec = &ipv6;
1380         item.mask = &ipv6_mask;
1381         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1382                                              MLX5_MODIFICATION_TYPE_SET, error);
1383 }
1384
1385 static int
1386 mlx5_flow_item_field_width(struct rte_eth_dev *dev,
1387                            enum rte_flow_field_id field, int inherit,
1388                            const struct rte_flow_attr *attr,
1389                            struct rte_flow_error *error)
1390 {
1391         struct mlx5_priv *priv = dev->data->dev_private;
1392
1393         switch (field) {
1394         case RTE_FLOW_FIELD_START:
1395                 return 32;
1396         case RTE_FLOW_FIELD_MAC_DST:
1397         case RTE_FLOW_FIELD_MAC_SRC:
1398                 return 48;
1399         case RTE_FLOW_FIELD_VLAN_TYPE:
1400                 return 16;
1401         case RTE_FLOW_FIELD_VLAN_ID:
1402                 return 12;
1403         case RTE_FLOW_FIELD_MAC_TYPE:
1404                 return 16;
1405         case RTE_FLOW_FIELD_IPV4_DSCP:
1406                 return 6;
1407         case RTE_FLOW_FIELD_IPV4_TTL:
1408                 return 8;
1409         case RTE_FLOW_FIELD_IPV4_SRC:
1410         case RTE_FLOW_FIELD_IPV4_DST:
1411                 return 32;
1412         case RTE_FLOW_FIELD_IPV6_DSCP:
1413                 return 6;
1414         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1415                 return 8;
1416         case RTE_FLOW_FIELD_IPV6_SRC:
1417         case RTE_FLOW_FIELD_IPV6_DST:
1418                 return 128;
1419         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1420         case RTE_FLOW_FIELD_TCP_PORT_DST:
1421                 return 16;
1422         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1423         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1424                 return 32;
1425         case RTE_FLOW_FIELD_TCP_FLAGS:
1426                 return 9;
1427         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1428         case RTE_FLOW_FIELD_UDP_PORT_DST:
1429                 return 16;
1430         case RTE_FLOW_FIELD_VXLAN_VNI:
1431         case RTE_FLOW_FIELD_GENEVE_VNI:
1432                 return 24;
1433         case RTE_FLOW_FIELD_GTP_TEID:
1434         case RTE_FLOW_FIELD_TAG:
1435                 return 32;
1436         case RTE_FLOW_FIELD_MARK:
1437                 return __builtin_popcount(priv->sh->dv_mark_mask);
1438         case RTE_FLOW_FIELD_META:
1439                 return (flow_dv_get_metadata_reg(dev, attr, error) == REG_C_0) ?
1440                         __builtin_popcount(priv->sh->dv_meta_mask) : 32;
1441         case RTE_FLOW_FIELD_POINTER:
1442         case RTE_FLOW_FIELD_VALUE:
1443                 return inherit < 0 ? 0 : inherit;
1444         default:
1445                 MLX5_ASSERT(false);
1446         }
1447         return 0;
1448 }
1449
1450 static void
1451 mlx5_flow_field_id_to_modify_info
1452                 (const struct rte_flow_action_modify_data *data,
1453                  struct field_modify_info *info, uint32_t *mask,
1454                  uint32_t width, struct rte_eth_dev *dev,
1455                  const struct rte_flow_attr *attr, struct rte_flow_error *error)
1456 {
1457         struct mlx5_priv *priv = dev->data->dev_private;
1458         uint32_t idx = 0;
1459         uint32_t off = 0;
1460
1461         switch (data->field) {
1462         case RTE_FLOW_FIELD_START:
1463                 /* not supported yet */
1464                 MLX5_ASSERT(false);
1465                 break;
1466         case RTE_FLOW_FIELD_MAC_DST:
1467                 off = data->offset > 16 ? data->offset - 16 : 0;
1468                 if (mask) {
1469                         if (data->offset < 16) {
1470                                 info[idx] = (struct field_modify_info){2, 4,
1471                                                 MLX5_MODI_OUT_DMAC_15_0};
1472                                 if (width < 16) {
1473                                         mask[1] = rte_cpu_to_be_16(0xffff >>
1474                                                                  (16 - width));
1475                                         width = 0;
1476                                 } else {
1477                                         mask[1] = RTE_BE16(0xffff);
1478                                         width -= 16;
1479                                 }
1480                                 if (!width)
1481                                         break;
1482                                 ++idx;
1483                         }
1484                         info[idx] = (struct field_modify_info){4, 0,
1485                                                 MLX5_MODI_OUT_DMAC_47_16};
1486                         mask[0] = rte_cpu_to_be_32((0xffffffff >>
1487                                                     (32 - width)) << off);
1488                 } else {
1489                         if (data->offset < 16)
1490                                 info[idx++] = (struct field_modify_info){2, 0,
1491                                                 MLX5_MODI_OUT_DMAC_15_0};
1492                         info[idx] = (struct field_modify_info){4, off,
1493                                                 MLX5_MODI_OUT_DMAC_47_16};
1494                 }
1495                 break;
1496         case RTE_FLOW_FIELD_MAC_SRC:
1497                 off = data->offset > 16 ? data->offset - 16 : 0;
1498                 if (mask) {
1499                         if (data->offset < 16) {
1500                                 info[idx] = (struct field_modify_info){2, 4,
1501                                                 MLX5_MODI_OUT_SMAC_15_0};
1502                                 if (width < 16) {
1503                                         mask[1] = rte_cpu_to_be_16(0xffff >>
1504                                                                  (16 - width));
1505                                         width = 0;
1506                                 } else {
1507                                         mask[1] = RTE_BE16(0xffff);
1508                                         width -= 16;
1509                                 }
1510                                 if (!width)
1511                                         break;
1512                                 ++idx;
1513                         }
1514                         info[idx] = (struct field_modify_info){4, 0,
1515                                                 MLX5_MODI_OUT_SMAC_47_16};
1516                         mask[0] = rte_cpu_to_be_32((0xffffffff >>
1517                                                     (32 - width)) << off);
1518                 } else {
1519                         if (data->offset < 16)
1520                                 info[idx++] = (struct field_modify_info){2, 0,
1521                                                 MLX5_MODI_OUT_SMAC_15_0};
1522                         info[idx] = (struct field_modify_info){4, off,
1523                                                 MLX5_MODI_OUT_SMAC_47_16};
1524                 }
1525                 break;
1526         case RTE_FLOW_FIELD_VLAN_TYPE:
1527                 /* not supported yet */
1528                 break;
1529         case RTE_FLOW_FIELD_VLAN_ID:
1530                 info[idx] = (struct field_modify_info){2, 0,
1531                                         MLX5_MODI_OUT_FIRST_VID};
1532                 if (mask)
1533                         mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1534                 break;
1535         case RTE_FLOW_FIELD_MAC_TYPE:
1536                 info[idx] = (struct field_modify_info){2, 0,
1537                                         MLX5_MODI_OUT_ETHERTYPE};
1538                 if (mask)
1539                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1540                 break;
1541         case RTE_FLOW_FIELD_IPV4_DSCP:
1542                 info[idx] = (struct field_modify_info){1, 0,
1543                                         MLX5_MODI_OUT_IP_DSCP};
1544                 if (mask)
1545                         mask[idx] = 0x3f >> (6 - width);
1546                 break;
1547         case RTE_FLOW_FIELD_IPV4_TTL:
1548                 info[idx] = (struct field_modify_info){1, 0,
1549                                         MLX5_MODI_OUT_IPV4_TTL};
1550                 if (mask)
1551                         mask[idx] = 0xff >> (8 - width);
1552                 break;
1553         case RTE_FLOW_FIELD_IPV4_SRC:
1554                 info[idx] = (struct field_modify_info){4, 0,
1555                                         MLX5_MODI_OUT_SIPV4};
1556                 if (mask)
1557                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1558                                                      (32 - width));
1559                 break;
1560         case RTE_FLOW_FIELD_IPV4_DST:
1561                 info[idx] = (struct field_modify_info){4, 0,
1562                                         MLX5_MODI_OUT_DIPV4};
1563                 if (mask)
1564                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1565                                                      (32 - width));
1566                 break;
1567         case RTE_FLOW_FIELD_IPV6_DSCP:
1568                 info[idx] = (struct field_modify_info){1, 0,
1569                                         MLX5_MODI_OUT_IP_DSCP};
1570                 if (mask)
1571                         mask[idx] = 0x3f >> (6 - width);
1572                 break;
1573         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1574                 info[idx] = (struct field_modify_info){1, 0,
1575                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1576                 if (mask)
1577                         mask[idx] = 0xff >> (8 - width);
1578                 break;
1579         case RTE_FLOW_FIELD_IPV6_SRC:
1580                 if (mask) {
1581                         if (data->offset < 32) {
1582                                 info[idx] = (struct field_modify_info){4, 12,
1583                                                 MLX5_MODI_OUT_SIPV6_31_0};
1584                                 if (width < 32) {
1585                                         mask[3] =
1586                                                 rte_cpu_to_be_32(0xffffffff >>
1587                                                                  (32 - width));
1588                                         width = 0;
1589                                 } else {
1590                                         mask[3] = RTE_BE32(0xffffffff);
1591                                         width -= 32;
1592                                 }
1593                                 if (!width)
1594                                         break;
1595                                 ++idx;
1596                         }
1597                         if (data->offset < 64) {
1598                                 info[idx] = (struct field_modify_info){4, 8,
1599                                                 MLX5_MODI_OUT_SIPV6_63_32};
1600                                 if (width < 32) {
1601                                         mask[2] =
1602                                                 rte_cpu_to_be_32(0xffffffff >>
1603                                                                  (32 - width));
1604                                         width = 0;
1605                                 } else {
1606                                         mask[2] = RTE_BE32(0xffffffff);
1607                                         width -= 32;
1608                                 }
1609                                 if (!width)
1610                                         break;
1611                                 ++idx;
1612                         }
1613                         if (data->offset < 96) {
1614                                 info[idx] = (struct field_modify_info){4, 4,
1615                                                 MLX5_MODI_OUT_SIPV6_95_64};
1616                                 if (width < 32) {
1617                                         mask[1] =
1618                                                 rte_cpu_to_be_32(0xffffffff >>
1619                                                                  (32 - width));
1620                                         width = 0;
1621                                 } else {
1622                                         mask[1] = RTE_BE32(0xffffffff);
1623                                         width -= 32;
1624                                 }
1625                                 if (!width)
1626                                         break;
1627                                 ++idx;
1628                         }
1629                         info[idx] = (struct field_modify_info){4, 0,
1630                                                 MLX5_MODI_OUT_SIPV6_127_96};
1631                         mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
1632                 } else {
1633                         if (data->offset < 32)
1634                                 info[idx++] = (struct field_modify_info){4, 0,
1635                                                 MLX5_MODI_OUT_SIPV6_31_0};
1636                         if (data->offset < 64)
1637                                 info[idx++] = (struct field_modify_info){4, 0,
1638                                                 MLX5_MODI_OUT_SIPV6_63_32};
1639                         if (data->offset < 96)
1640                                 info[idx++] = (struct field_modify_info){4, 0,
1641                                                 MLX5_MODI_OUT_SIPV6_95_64};
1642                         if (data->offset < 128)
1643                                 info[idx++] = (struct field_modify_info){4, 0,
1644                                                 MLX5_MODI_OUT_SIPV6_127_96};
1645                 }
1646                 break;
1647         case RTE_FLOW_FIELD_IPV6_DST:
1648                 if (mask) {
1649                         if (data->offset < 32) {
1650                                 info[idx] = (struct field_modify_info){4, 12,
1651                                                 MLX5_MODI_OUT_DIPV6_31_0};
1652                                 if (width < 32) {
1653                                         mask[3] =
1654                                                 rte_cpu_to_be_32(0xffffffff >>
1655                                                                  (32 - width));
1656                                         width = 0;
1657                                 } else {
1658                                         mask[3] = RTE_BE32(0xffffffff);
1659                                         width -= 32;
1660                                 }
1661                                 if (!width)
1662                                         break;
1663                                 ++idx;
1664                         }
1665                         if (data->offset < 64) {
1666                                 info[idx] = (struct field_modify_info){4, 8,
1667                                                 MLX5_MODI_OUT_DIPV6_63_32};
1668                                 if (width < 32) {
1669                                         mask[2] =
1670                                                 rte_cpu_to_be_32(0xffffffff >>
1671                                                                  (32 - width));
1672                                         width = 0;
1673                                 } else {
1674                                         mask[2] = RTE_BE32(0xffffffff);
1675                                         width -= 32;
1676                                 }
1677                                 if (!width)
1678                                         break;
1679                                 ++idx;
1680                         }
1681                         if (data->offset < 96) {
1682                                 info[idx] = (struct field_modify_info){4, 4,
1683                                                 MLX5_MODI_OUT_DIPV6_95_64};
1684                                 if (width < 32) {
1685                                         mask[1] =
1686                                                 rte_cpu_to_be_32(0xffffffff >>
1687                                                                  (32 - width));
1688                                         width = 0;
1689                                 } else {
1690                                         mask[1] = RTE_BE32(0xffffffff);
1691                                         width -= 32;
1692                                 }
1693                                 if (!width)
1694                                         break;
1695                                 ++idx;
1696                         }
1697                         info[idx] = (struct field_modify_info){4, 0,
1698                                                 MLX5_MODI_OUT_DIPV6_127_96};
1699                         mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
1700                 } else {
1701                         if (data->offset < 32)
1702                                 info[idx++] = (struct field_modify_info){4, 0,
1703                                                 MLX5_MODI_OUT_DIPV6_31_0};
1704                         if (data->offset < 64)
1705                                 info[idx++] = (struct field_modify_info){4, 0,
1706                                                 MLX5_MODI_OUT_DIPV6_63_32};
1707                         if (data->offset < 96)
1708                                 info[idx++] = (struct field_modify_info){4, 0,
1709                                                 MLX5_MODI_OUT_DIPV6_95_64};
1710                         if (data->offset < 128)
1711                                 info[idx++] = (struct field_modify_info){4, 0,
1712                                                 MLX5_MODI_OUT_DIPV6_127_96};
1713                 }
1714                 break;
1715         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1716                 info[idx] = (struct field_modify_info){2, 0,
1717                                         MLX5_MODI_OUT_TCP_SPORT};
1718                 if (mask)
1719                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1720                 break;
1721         case RTE_FLOW_FIELD_TCP_PORT_DST:
1722                 info[idx] = (struct field_modify_info){2, 0,
1723                                         MLX5_MODI_OUT_TCP_DPORT};
1724                 if (mask)
1725                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1726                 break;
1727         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1728                 info[idx] = (struct field_modify_info){4, 0,
1729                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1730                 if (mask)
1731                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1732                                                      (32 - width));
1733                 break;
1734         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1735                 info[idx] = (struct field_modify_info){4, 0,
1736                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1737                 if (mask)
1738                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1739                                                      (32 - width));
1740                 break;
1741         case RTE_FLOW_FIELD_TCP_FLAGS:
1742                 info[idx] = (struct field_modify_info){2, 0,
1743                                         MLX5_MODI_OUT_TCP_FLAGS};
1744                 if (mask)
1745                         mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
1746                 break;
1747         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1748                 info[idx] = (struct field_modify_info){2, 0,
1749                                         MLX5_MODI_OUT_UDP_SPORT};
1750                 if (mask)
1751                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1752                 break;
1753         case RTE_FLOW_FIELD_UDP_PORT_DST:
1754                 info[idx] = (struct field_modify_info){2, 0,
1755                                         MLX5_MODI_OUT_UDP_DPORT};
1756                 if (mask)
1757                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1758                 break;
1759         case RTE_FLOW_FIELD_VXLAN_VNI:
1760                 /* not supported yet */
1761                 break;
1762         case RTE_FLOW_FIELD_GENEVE_VNI:
1763                 /* not supported yet*/
1764                 break;
1765         case RTE_FLOW_FIELD_GTP_TEID:
1766                 info[idx] = (struct field_modify_info){4, 0,
1767                                         MLX5_MODI_GTP_TEID};
1768                 if (mask)
1769                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1770                                                      (32 - width));
1771                 break;
1772         case RTE_FLOW_FIELD_TAG:
1773                 {
1774                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1775                                                    data->level, error);
1776                         if (reg < 0)
1777                                 return;
1778                         MLX5_ASSERT(reg != REG_NON);
1779                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1780                         info[idx] = (struct field_modify_info){4, 0,
1781                                                 reg_to_field[reg]};
1782                         if (mask)
1783                                 mask[idx] =
1784                                         rte_cpu_to_be_32(0xffffffff >>
1785                                                          (32 - width));
1786                 }
1787                 break;
1788         case RTE_FLOW_FIELD_MARK:
1789                 {
1790                         uint32_t mark_mask = priv->sh->dv_mark_mask;
1791                         uint32_t mark_count = __builtin_popcount(mark_mask);
1792                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1793                                                        0, error);
1794                         if (reg < 0)
1795                                 return;
1796                         MLX5_ASSERT(reg != REG_NON);
1797                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1798                         info[idx] = (struct field_modify_info){4, 0,
1799                                                 reg_to_field[reg]};
1800                         if (mask)
1801                                 mask[idx] = rte_cpu_to_be_32((mark_mask >>
1802                                          (mark_count - width)) & mark_mask);
1803                 }
1804                 break;
1805         case RTE_FLOW_FIELD_META:
1806                 {
1807                         uint32_t meta_mask = priv->sh->dv_meta_mask;
1808                         uint32_t meta_count = __builtin_popcount(meta_mask);
1809                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1810                         if (reg < 0)
1811                                 return;
1812                         MLX5_ASSERT(reg != REG_NON);
1813                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1814                         info[idx] = (struct field_modify_info){4, 0,
1815                                                 reg_to_field[reg]};
1816                         if (mask)
1817                                 mask[idx] = rte_cpu_to_be_32((meta_mask >>
1818                                         (meta_count - width)) & meta_mask);
1819                 }
1820                 break;
1821         case RTE_FLOW_FIELD_POINTER:
1822         case RTE_FLOW_FIELD_VALUE:
1823         default:
1824                 MLX5_ASSERT(false);
1825                 break;
1826         }
1827 }
1828
1829 /**
1830  * Convert modify_field action to DV specification.
1831  *
1832  * @param[in] dev
1833  *   Pointer to the rte_eth_dev structure.
1834  * @param[in,out] resource
1835  *   Pointer to the modify-header resource.
1836  * @param[in] action
1837  *   Pointer to action specification.
1838  * @param[in] attr
1839  *   Attributes of flow that includes this item.
1840  * @param[out] error
1841  *   Pointer to the error structure.
1842  *
1843  * @return
1844  *   0 on success, a negative errno value otherwise and rte_errno is set.
1845  */
1846 static int
1847 flow_dv_convert_action_modify_field
1848                         (struct rte_eth_dev *dev,
1849                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1850                          const struct rte_flow_action *action,
1851                          const struct rte_flow_attr *attr,
1852                          struct rte_flow_error *error)
1853 {
1854         const struct rte_flow_action_modify_field *conf =
1855                 (const struct rte_flow_action_modify_field *)(action->conf);
1856         struct rte_flow_item item = {
1857                 .spec = NULL,
1858                 .mask = NULL
1859         };
1860         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1861                                                                 {0, 0, 0} };
1862         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1863                                                                 {0, 0, 0} };
1864         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1865         uint32_t type, meta = 0;
1866
1867         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1868             conf->src.field == RTE_FLOW_FIELD_VALUE) {
1869                 type = MLX5_MODIFICATION_TYPE_SET;
1870                 /** For SET fill the destination field (field) first. */
1871                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1872                                                   conf->width, dev,
1873                                                   attr, error);
1874                 item.spec = conf->src.field == RTE_FLOW_FIELD_POINTER ?
1875                                         (void *)(uintptr_t)conf->src.pvalue :
1876                                         (void *)(uintptr_t)&conf->src.value;
1877                 if (conf->dst.field == RTE_FLOW_FIELD_META) {
1878                         meta = *(const unaligned_uint32_t *)item.spec;
1879                         meta = rte_cpu_to_be_32(meta);
1880                         item.spec = &meta;
1881                 }
1882         } else {
1883                 type = MLX5_MODIFICATION_TYPE_COPY;
1884                 /** For COPY fill the destination field (dcopy) without mask. */
1885                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1886                                                   conf->width, dev,
1887                                                   attr, error);
1888                 /** Then construct the source field (field) with mask. */
1889                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1890                                                   conf->width, dev,
1891                                                   attr, error);
1892         }
1893         item.mask = &mask;
1894         return flow_dv_convert_modify_action(&item,
1895                         field, dcopy, resource, type, error);
1896 }
1897
1898 /**
1899  * Validate MARK item.
1900  *
1901  * @param[in] dev
1902  *   Pointer to the rte_eth_dev structure.
1903  * @param[in] item
1904  *   Item specification.
1905  * @param[in] attr
1906  *   Attributes of flow that includes this item.
1907  * @param[out] error
1908  *   Pointer to error structure.
1909  *
1910  * @return
1911  *   0 on success, a negative errno value otherwise and rte_errno is set.
1912  */
1913 static int
1914 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1915                            const struct rte_flow_item *item,
1916                            const struct rte_flow_attr *attr __rte_unused,
1917                            struct rte_flow_error *error)
1918 {
1919         struct mlx5_priv *priv = dev->data->dev_private;
1920         struct mlx5_dev_config *config = &priv->config;
1921         const struct rte_flow_item_mark *spec = item->spec;
1922         const struct rte_flow_item_mark *mask = item->mask;
1923         const struct rte_flow_item_mark nic_mask = {
1924                 .id = priv->sh->dv_mark_mask,
1925         };
1926         int ret;
1927
1928         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1929                 return rte_flow_error_set(error, ENOTSUP,
1930                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1931                                           "extended metadata feature"
1932                                           " isn't enabled");
1933         if (!mlx5_flow_ext_mreg_supported(dev))
1934                 return rte_flow_error_set(error, ENOTSUP,
1935                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1936                                           "extended metadata register"
1937                                           " isn't supported");
1938         if (!nic_mask.id)
1939                 return rte_flow_error_set(error, ENOTSUP,
1940                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1941                                           "extended metadata register"
1942                                           " isn't available");
1943         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1944         if (ret < 0)
1945                 return ret;
1946         if (!spec)
1947                 return rte_flow_error_set(error, EINVAL,
1948                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1949                                           item->spec,
1950                                           "data cannot be empty");
1951         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1952                 return rte_flow_error_set(error, EINVAL,
1953                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1954                                           &spec->id,
1955                                           "mark id exceeds the limit");
1956         if (!mask)
1957                 mask = &nic_mask;
1958         if (!mask->id)
1959                 return rte_flow_error_set(error, EINVAL,
1960                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1961                                         "mask cannot be zero");
1962
1963         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1964                                         (const uint8_t *)&nic_mask,
1965                                         sizeof(struct rte_flow_item_mark),
1966                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1967         if (ret < 0)
1968                 return ret;
1969         return 0;
1970 }
1971
1972 /**
1973  * Validate META item.
1974  *
1975  * @param[in] dev
1976  *   Pointer to the rte_eth_dev structure.
1977  * @param[in] item
1978  *   Item specification.
1979  * @param[in] attr
1980  *   Attributes of flow that includes this item.
1981  * @param[out] error
1982  *   Pointer to error structure.
1983  *
1984  * @return
1985  *   0 on success, a negative errno value otherwise and rte_errno is set.
1986  */
1987 static int
1988 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1989                            const struct rte_flow_item *item,
1990                            const struct rte_flow_attr *attr,
1991                            struct rte_flow_error *error)
1992 {
1993         struct mlx5_priv *priv = dev->data->dev_private;
1994         struct mlx5_dev_config *config = &priv->config;
1995         const struct rte_flow_item_meta *spec = item->spec;
1996         const struct rte_flow_item_meta *mask = item->mask;
1997         struct rte_flow_item_meta nic_mask = {
1998                 .data = UINT32_MAX
1999         };
2000         int reg;
2001         int ret;
2002
2003         if (!spec)
2004                 return rte_flow_error_set(error, EINVAL,
2005                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2006                                           item->spec,
2007                                           "data cannot be empty");
2008         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2009                 if (!mlx5_flow_ext_mreg_supported(dev))
2010                         return rte_flow_error_set(error, ENOTSUP,
2011                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2012                                           "extended metadata register"
2013                                           " isn't supported");
2014                 reg = flow_dv_get_metadata_reg(dev, attr, error);
2015                 if (reg < 0)
2016                         return reg;
2017                 if (reg == REG_NON)
2018                         return rte_flow_error_set(error, ENOTSUP,
2019                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2020                                         "unavailable extended metadata register");
2021                 if (reg == REG_B)
2022                         return rte_flow_error_set(error, ENOTSUP,
2023                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2024                                           "match on reg_b "
2025                                           "isn't supported");
2026                 if (reg != REG_A)
2027                         nic_mask.data = priv->sh->dv_meta_mask;
2028         } else {
2029                 if (attr->transfer)
2030                         return rte_flow_error_set(error, ENOTSUP,
2031                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2032                                         "extended metadata feature "
2033                                         "should be enabled when "
2034                                         "meta item is requested "
2035                                         "with e-switch mode ");
2036                 if (attr->ingress)
2037                         return rte_flow_error_set(error, ENOTSUP,
2038                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2039                                         "match on metadata for ingress "
2040                                         "is not supported in legacy "
2041                                         "metadata mode");
2042         }
2043         if (!mask)
2044                 mask = &rte_flow_item_meta_mask;
2045         if (!mask->data)
2046                 return rte_flow_error_set(error, EINVAL,
2047                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2048                                         "mask cannot be zero");
2049
2050         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2051                                         (const uint8_t *)&nic_mask,
2052                                         sizeof(struct rte_flow_item_meta),
2053                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2054         return ret;
2055 }
2056
2057 /**
2058  * Validate TAG item.
2059  *
2060  * @param[in] dev
2061  *   Pointer to the rte_eth_dev structure.
2062  * @param[in] item
2063  *   Item specification.
2064  * @param[in] attr
2065  *   Attributes of flow that includes this item.
2066  * @param[out] error
2067  *   Pointer to error structure.
2068  *
2069  * @return
2070  *   0 on success, a negative errno value otherwise and rte_errno is set.
2071  */
2072 static int
2073 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2074                           const struct rte_flow_item *item,
2075                           const struct rte_flow_attr *attr __rte_unused,
2076                           struct rte_flow_error *error)
2077 {
2078         const struct rte_flow_item_tag *spec = item->spec;
2079         const struct rte_flow_item_tag *mask = item->mask;
2080         const struct rte_flow_item_tag nic_mask = {
2081                 .data = RTE_BE32(UINT32_MAX),
2082                 .index = 0xff,
2083         };
2084         int ret;
2085
2086         if (!mlx5_flow_ext_mreg_supported(dev))
2087                 return rte_flow_error_set(error, ENOTSUP,
2088                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2089                                           "extensive metadata register"
2090                                           " isn't supported");
2091         if (!spec)
2092                 return rte_flow_error_set(error, EINVAL,
2093                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2094                                           item->spec,
2095                                           "data cannot be empty");
2096         if (!mask)
2097                 mask = &rte_flow_item_tag_mask;
2098         if (!mask->data)
2099                 return rte_flow_error_set(error, EINVAL,
2100                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2101                                         "mask cannot be zero");
2102
2103         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2104                                         (const uint8_t *)&nic_mask,
2105                                         sizeof(struct rte_flow_item_tag),
2106                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2107         if (ret < 0)
2108                 return ret;
2109         if (mask->index != 0xff)
2110                 return rte_flow_error_set(error, EINVAL,
2111                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2112                                           "partial mask for tag index"
2113                                           " is not supported");
2114         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2115         if (ret < 0)
2116                 return ret;
2117         MLX5_ASSERT(ret != REG_NON);
2118         return 0;
2119 }
2120
2121 /**
2122  * Validate vport item.
2123  *
2124  * @param[in] dev
2125  *   Pointer to the rte_eth_dev structure.
2126  * @param[in] item
2127  *   Item specification.
2128  * @param[in] attr
2129  *   Attributes of flow that includes this item.
2130  * @param[in] item_flags
2131  *   Bit-fields that holds the items detected until now.
2132  * @param[out] error
2133  *   Pointer to error structure.
2134  *
2135  * @return
2136  *   0 on success, a negative errno value otherwise and rte_errno is set.
2137  */
2138 static int
2139 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2140                               const struct rte_flow_item *item,
2141                               const struct rte_flow_attr *attr,
2142                               uint64_t item_flags,
2143                               struct rte_flow_error *error)
2144 {
2145         const struct rte_flow_item_port_id *spec = item->spec;
2146         const struct rte_flow_item_port_id *mask = item->mask;
2147         const struct rte_flow_item_port_id switch_mask = {
2148                         .id = 0xffffffff,
2149         };
2150         struct mlx5_priv *esw_priv;
2151         struct mlx5_priv *dev_priv;
2152         int ret;
2153
2154         if (!attr->transfer)
2155                 return rte_flow_error_set(error, EINVAL,
2156                                           RTE_FLOW_ERROR_TYPE_ITEM,
2157                                           NULL,
2158                                           "match on port id is valid only"
2159                                           " when transfer flag is enabled");
2160         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2161                 return rte_flow_error_set(error, ENOTSUP,
2162                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2163                                           "multiple source ports are not"
2164                                           " supported");
2165         if (!mask)
2166                 mask = &switch_mask;
2167         if (mask->id != 0xffffffff)
2168                 return rte_flow_error_set(error, ENOTSUP,
2169                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2170                                            mask,
2171                                            "no support for partial mask on"
2172                                            " \"id\" field");
2173         ret = mlx5_flow_item_acceptable
2174                                 (item, (const uint8_t *)mask,
2175                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2176                                  sizeof(struct rte_flow_item_port_id),
2177                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2178         if (ret)
2179                 return ret;
2180         if (!spec)
2181                 return 0;
2182         if (spec->id == MLX5_PORT_ESW_MGR)
2183                 return 0;
2184         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2185         if (!esw_priv)
2186                 return rte_flow_error_set(error, rte_errno,
2187                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2188                                           "failed to obtain E-Switch info for"
2189                                           " port");
2190         dev_priv = mlx5_dev_to_eswitch_info(dev);
2191         if (!dev_priv)
2192                 return rte_flow_error_set(error, rte_errno,
2193                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2194                                           NULL,
2195                                           "failed to obtain E-Switch info");
2196         if (esw_priv->domain_id != dev_priv->domain_id)
2197                 return rte_flow_error_set(error, EINVAL,
2198                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2199                                           "cannot match on a port from a"
2200                                           " different E-Switch");
2201         return 0;
2202 }
2203
2204 /**
2205  * Validate VLAN item.
2206  *
2207  * @param[in] item
2208  *   Item specification.
2209  * @param[in] item_flags
2210  *   Bit-fields that holds the items detected until now.
2211  * @param[in] dev
2212  *   Ethernet device flow is being created on.
2213  * @param[out] error
2214  *   Pointer to error structure.
2215  *
2216  * @return
2217  *   0 on success, a negative errno value otherwise and rte_errno is set.
2218  */
2219 static int
2220 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2221                            uint64_t item_flags,
2222                            struct rte_eth_dev *dev,
2223                            struct rte_flow_error *error)
2224 {
2225         const struct rte_flow_item_vlan *mask = item->mask;
2226         const struct rte_flow_item_vlan nic_mask = {
2227                 .tci = RTE_BE16(UINT16_MAX),
2228                 .inner_type = RTE_BE16(UINT16_MAX),
2229                 .has_more_vlan = 1,
2230         };
2231         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2232         int ret;
2233         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2234                                         MLX5_FLOW_LAYER_INNER_L4) :
2235                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2236                                         MLX5_FLOW_LAYER_OUTER_L4);
2237         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2238                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2239
2240         if (item_flags & vlanm)
2241                 return rte_flow_error_set(error, EINVAL,
2242                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2243                                           "multiple VLAN layers not supported");
2244         else if ((item_flags & l34m) != 0)
2245                 return rte_flow_error_set(error, EINVAL,
2246                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2247                                           "VLAN cannot follow L3/L4 layer");
2248         if (!mask)
2249                 mask = &rte_flow_item_vlan_mask;
2250         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2251                                         (const uint8_t *)&nic_mask,
2252                                         sizeof(struct rte_flow_item_vlan),
2253                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2254         if (ret)
2255                 return ret;
2256         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2257                 struct mlx5_priv *priv = dev->data->dev_private;
2258
2259                 if (priv->vmwa_context) {
2260                         /*
2261                          * Non-NULL context means we have a virtual machine
2262                          * and SR-IOV enabled, we have to create VLAN interface
2263                          * to make hypervisor to setup E-Switch vport
2264                          * context correctly. We avoid creating the multiple
2265                          * VLAN interfaces, so we cannot support VLAN tag mask.
2266                          */
2267                         return rte_flow_error_set(error, EINVAL,
2268                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2269                                                   item,
2270                                                   "VLAN tag mask is not"
2271                                                   " supported in virtual"
2272                                                   " environment");
2273                 }
2274         }
2275         return 0;
2276 }
2277
2278 /*
2279  * GTP flags are contained in 1 byte of the format:
2280  * -------------------------------------------
2281  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2282  * |-----------------------------------------|
2283  * | value | Version | PT | Res | E | S | PN |
2284  * -------------------------------------------
2285  *
2286  * Matching is supported only for GTP flags E, S, PN.
2287  */
2288 #define MLX5_GTP_FLAGS_MASK     0x07
2289
2290 /**
2291  * Validate GTP item.
2292  *
2293  * @param[in] dev
2294  *   Pointer to the rte_eth_dev structure.
2295  * @param[in] item
2296  *   Item specification.
2297  * @param[in] item_flags
2298  *   Bit-fields that holds the items detected until now.
2299  * @param[out] error
2300  *   Pointer to error structure.
2301  *
2302  * @return
2303  *   0 on success, a negative errno value otherwise and rte_errno is set.
2304  */
2305 static int
2306 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2307                           const struct rte_flow_item *item,
2308                           uint64_t item_flags,
2309                           struct rte_flow_error *error)
2310 {
2311         struct mlx5_priv *priv = dev->data->dev_private;
2312         const struct rte_flow_item_gtp *spec = item->spec;
2313         const struct rte_flow_item_gtp *mask = item->mask;
2314         const struct rte_flow_item_gtp nic_mask = {
2315                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2316                 .msg_type = 0xff,
2317                 .teid = RTE_BE32(0xffffffff),
2318         };
2319
2320         if (!priv->config.hca_attr.tunnel_stateless_gtp)
2321                 return rte_flow_error_set(error, ENOTSUP,
2322                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2323                                           "GTP support is not enabled");
2324         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2325                 return rte_flow_error_set(error, ENOTSUP,
2326                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2327                                           "multiple tunnel layers not"
2328                                           " supported");
2329         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2330                 return rte_flow_error_set(error, EINVAL,
2331                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2332                                           "no outer UDP layer found");
2333         if (!mask)
2334                 mask = &rte_flow_item_gtp_mask;
2335         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2336                 return rte_flow_error_set(error, ENOTSUP,
2337                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2338                                           "Match is supported for GTP"
2339                                           " flags only");
2340         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2341                                          (const uint8_t *)&nic_mask,
2342                                          sizeof(struct rte_flow_item_gtp),
2343                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2344 }
2345
2346 /**
2347  * Validate GTP PSC item.
2348  *
2349  * @param[in] item
2350  *   Item specification.
2351  * @param[in] last_item
2352  *   Previous validated item in the pattern items.
2353  * @param[in] gtp_item
2354  *   Previous GTP item specification.
2355  * @param[in] attr
2356  *   Pointer to flow attributes.
2357  * @param[out] error
2358  *   Pointer to error structure.
2359  *
2360  * @return
2361  *   0 on success, a negative errno value otherwise and rte_errno is set.
2362  */
2363 static int
2364 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2365                               uint64_t last_item,
2366                               const struct rte_flow_item *gtp_item,
2367                               const struct rte_flow_attr *attr,
2368                               struct rte_flow_error *error)
2369 {
2370         const struct rte_flow_item_gtp *gtp_spec;
2371         const struct rte_flow_item_gtp *gtp_mask;
2372         const struct rte_flow_item_gtp_psc *mask;
2373         const struct rte_flow_item_gtp_psc nic_mask = {
2374                 .hdr.type = 0xF,
2375                 .hdr.qfi = 0x3F,
2376         };
2377
2378         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2379                 return rte_flow_error_set
2380                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2381                          "GTP PSC item must be preceded with GTP item");
2382         gtp_spec = gtp_item->spec;
2383         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2384         /* GTP spec and E flag is requested to match zero. */
2385         if (gtp_spec &&
2386                 (gtp_mask->v_pt_rsv_flags &
2387                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2388                 return rte_flow_error_set
2389                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2390                          "GTP E flag must be 1 to match GTP PSC");
2391         /* Check the flow is not created in group zero. */
2392         if (!attr->transfer && !attr->group)
2393                 return rte_flow_error_set
2394                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2395                          "GTP PSC is not supported for group 0");
2396         /* GTP spec is here and E flag is requested to match zero. */
2397         if (!item->spec)
2398                 return 0;
2399         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2400         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2401                                          (const uint8_t *)&nic_mask,
2402                                          sizeof(struct rte_flow_item_gtp_psc),
2403                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2404 }
2405
2406 /**
2407  * Validate IPV4 item.
2408  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2409  * add specific validation of fragment_offset field,
2410  *
2411  * @param[in] item
2412  *   Item specification.
2413  * @param[in] item_flags
2414  *   Bit-fields that holds the items detected until now.
2415  * @param[out] error
2416  *   Pointer to error structure.
2417  *
2418  * @return
2419  *   0 on success, a negative errno value otherwise and rte_errno is set.
2420  */
2421 static int
2422 flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
2423                            const struct rte_flow_item *item,
2424                            uint64_t item_flags, uint64_t last_item,
2425                            uint16_t ether_type, struct rte_flow_error *error)
2426 {
2427         int ret;
2428         struct mlx5_priv *priv = dev->data->dev_private;
2429         const struct rte_flow_item_ipv4 *spec = item->spec;
2430         const struct rte_flow_item_ipv4 *last = item->last;
2431         const struct rte_flow_item_ipv4 *mask = item->mask;
2432         rte_be16_t fragment_offset_spec = 0;
2433         rte_be16_t fragment_offset_last = 0;
2434         struct rte_flow_item_ipv4 nic_ipv4_mask = {
2435                 .hdr = {
2436                         .src_addr = RTE_BE32(0xffffffff),
2437                         .dst_addr = RTE_BE32(0xffffffff),
2438                         .type_of_service = 0xff,
2439                         .fragment_offset = RTE_BE16(0xffff),
2440                         .next_proto_id = 0xff,
2441                         .time_to_live = 0xff,
2442                 },
2443         };
2444
2445         if (mask && (mask->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK)) {
2446                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2447                 bool ihl_cap = !tunnel ? priv->config.hca_attr.outer_ipv4_ihl :
2448                                priv->config.hca_attr.inner_ipv4_ihl;
2449                 if (!ihl_cap)
2450                         return rte_flow_error_set(error, ENOTSUP,
2451                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2452                                                   item,
2453                                                   "IPV4 ihl offload not supported");
2454                 nic_ipv4_mask.hdr.version_ihl = mask->hdr.version_ihl;
2455         }
2456         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2457                                            ether_type, &nic_ipv4_mask,
2458                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2459         if (ret < 0)
2460                 return ret;
2461         if (spec && mask)
2462                 fragment_offset_spec = spec->hdr.fragment_offset &
2463                                        mask->hdr.fragment_offset;
2464         if (!fragment_offset_spec)
2465                 return 0;
2466         /*
2467          * spec and mask are valid, enforce using full mask to make sure the
2468          * complete value is used correctly.
2469          */
2470         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2471                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2472                 return rte_flow_error_set(error, EINVAL,
2473                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2474                                           item, "must use full mask for"
2475                                           " fragment_offset");
2476         /*
2477          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2478          * indicating this is 1st fragment of fragmented packet.
2479          * This is not yet supported in MLX5, return appropriate error message.
2480          */
2481         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2482                 return rte_flow_error_set(error, ENOTSUP,
2483                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2484                                           "match on first fragment not "
2485                                           "supported");
2486         if (fragment_offset_spec && !last)
2487                 return rte_flow_error_set(error, ENOTSUP,
2488                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2489                                           "specified value not supported");
2490         /* spec and last are valid, validate the specified range. */
2491         fragment_offset_last = last->hdr.fragment_offset &
2492                                mask->hdr.fragment_offset;
2493         /*
2494          * Match on fragment_offset spec 0x2001 and last 0x3fff
2495          * means MF is 1 and frag-offset is > 0.
2496          * This packet is fragment 2nd and onward, excluding last.
2497          * This is not yet supported in MLX5, return appropriate
2498          * error message.
2499          */
2500         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2501             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2502                 return rte_flow_error_set(error, ENOTSUP,
2503                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2504                                           last, "match on following "
2505                                           "fragments not supported");
2506         /*
2507          * Match on fragment_offset spec 0x0001 and last 0x1fff
2508          * means MF is 0 and frag-offset is > 0.
2509          * This packet is last fragment of fragmented packet.
2510          * This is not yet supported in MLX5, return appropriate
2511          * error message.
2512          */
2513         if (fragment_offset_spec == RTE_BE16(1) &&
2514             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2515                 return rte_flow_error_set(error, ENOTSUP,
2516                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2517                                           last, "match on last "
2518                                           "fragment not supported");
2519         /*
2520          * Match on fragment_offset spec 0x0001 and last 0x3fff
2521          * means MF and/or frag-offset is not 0.
2522          * This is a fragmented packet.
2523          * Other range values are invalid and rejected.
2524          */
2525         if (!(fragment_offset_spec == RTE_BE16(1) &&
2526               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2527                 return rte_flow_error_set(error, ENOTSUP,
2528                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2529                                           "specified range not supported");
2530         return 0;
2531 }
2532
2533 /**
2534  * Validate IPV6 fragment extension item.
2535  *
2536  * @param[in] item
2537  *   Item specification.
2538  * @param[in] item_flags
2539  *   Bit-fields that holds the items detected until now.
2540  * @param[out] error
2541  *   Pointer to error structure.
2542  *
2543  * @return
2544  *   0 on success, a negative errno value otherwise and rte_errno is set.
2545  */
2546 static int
2547 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2548                                     uint64_t item_flags,
2549                                     struct rte_flow_error *error)
2550 {
2551         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2552         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2553         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2554         rte_be16_t frag_data_spec = 0;
2555         rte_be16_t frag_data_last = 0;
2556         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2557         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2558                                       MLX5_FLOW_LAYER_OUTER_L4;
2559         int ret = 0;
2560         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2561                 .hdr = {
2562                         .next_header = 0xff,
2563                         .frag_data = RTE_BE16(0xffff),
2564                 },
2565         };
2566
2567         if (item_flags & l4m)
2568                 return rte_flow_error_set(error, EINVAL,
2569                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2570                                           "ipv6 fragment extension item cannot "
2571                                           "follow L4 item.");
2572         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2573             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2574                 return rte_flow_error_set(error, EINVAL,
2575                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2576                                           "ipv6 fragment extension item must "
2577                                           "follow ipv6 item");
2578         if (spec && mask)
2579                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2580         if (!frag_data_spec)
2581                 return 0;
2582         /*
2583          * spec and mask are valid, enforce using full mask to make sure the
2584          * complete value is used correctly.
2585          */
2586         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2587                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2588                 return rte_flow_error_set(error, EINVAL,
2589                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2590                                           item, "must use full mask for"
2591                                           " frag_data");
2592         /*
2593          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2594          * This is 1st fragment of fragmented packet.
2595          */
2596         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2597                 return rte_flow_error_set(error, ENOTSUP,
2598                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2599                                           "match on first fragment not "
2600                                           "supported");
2601         if (frag_data_spec && !last)
2602                 return rte_flow_error_set(error, EINVAL,
2603                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2604                                           "specified value not supported");
2605         ret = mlx5_flow_item_acceptable
2606                                 (item, (const uint8_t *)mask,
2607                                  (const uint8_t *)&nic_mask,
2608                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2609                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2610         if (ret)
2611                 return ret;
2612         /* spec and last are valid, validate the specified range. */
2613         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2614         /*
2615          * Match on frag_data spec 0x0009 and last 0xfff9
2616          * means M is 1 and frag-offset is > 0.
2617          * This packet is fragment 2nd and onward, excluding last.
2618          * This is not yet supported in MLX5, return appropriate
2619          * error message.
2620          */
2621         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2622                                        RTE_IPV6_EHDR_MF_MASK) &&
2623             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2624                 return rte_flow_error_set(error, ENOTSUP,
2625                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2626                                           last, "match on following "
2627                                           "fragments not supported");
2628         /*
2629          * Match on frag_data spec 0x0008 and last 0xfff8
2630          * means M is 0 and frag-offset is > 0.
2631          * This packet is last fragment of fragmented packet.
2632          * This is not yet supported in MLX5, return appropriate
2633          * error message.
2634          */
2635         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2636             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2637                 return rte_flow_error_set(error, ENOTSUP,
2638                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2639                                           last, "match on last "
2640                                           "fragment not supported");
2641         /* Other range values are invalid and rejected. */
2642         return rte_flow_error_set(error, EINVAL,
2643                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2644                                   "specified range not supported");
2645 }
2646
2647 /*
2648  * Validate ASO CT item.
2649  *
2650  * @param[in] dev
2651  *   Pointer to the rte_eth_dev structure.
2652  * @param[in] item
2653  *   Item specification.
2654  * @param[in] item_flags
2655  *   Pointer to bit-fields that holds the items detected until now.
2656  * @param[out] error
2657  *   Pointer to error structure.
2658  *
2659  * @return
2660  *   0 on success, a negative errno value otherwise and rte_errno is set.
2661  */
2662 static int
2663 flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
2664                              const struct rte_flow_item *item,
2665                              uint64_t *item_flags,
2666                              struct rte_flow_error *error)
2667 {
2668         const struct rte_flow_item_conntrack *spec = item->spec;
2669         const struct rte_flow_item_conntrack *mask = item->mask;
2670         RTE_SET_USED(dev);
2671         uint32_t flags;
2672
2673         if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
2674                 return rte_flow_error_set(error, EINVAL,
2675                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2676                                           "Only one CT is supported");
2677         if (!mask)
2678                 mask = &rte_flow_item_conntrack_mask;
2679         flags = spec->flags & mask->flags;
2680         if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
2681             ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
2682              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
2683              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
2684                 return rte_flow_error_set(error, EINVAL,
2685                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2686                                           "Conflict status bits");
2687         /* State change also needs to be considered. */
2688         *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
2689         return 0;
2690 }
2691
2692 /**
2693  * Validate the pop VLAN action.
2694  *
2695  * @param[in] dev
2696  *   Pointer to the rte_eth_dev structure.
2697  * @param[in] action_flags
2698  *   Holds the actions detected until now.
2699  * @param[in] action
2700  *   Pointer to the pop vlan action.
2701  * @param[in] item_flags
2702  *   The items found in this flow rule.
2703  * @param[in] attr
2704  *   Pointer to flow attributes.
2705  * @param[out] error
2706  *   Pointer to error structure.
2707  *
2708  * @return
2709  *   0 on success, a negative errno value otherwise and rte_errno is set.
2710  */
2711 static int
2712 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2713                                  uint64_t action_flags,
2714                                  const struct rte_flow_action *action,
2715                                  uint64_t item_flags,
2716                                  const struct rte_flow_attr *attr,
2717                                  struct rte_flow_error *error)
2718 {
2719         const struct mlx5_priv *priv = dev->data->dev_private;
2720         struct mlx5_dev_ctx_shared *sh = priv->sh;
2721         bool direction_error = false;
2722
2723         if (!priv->sh->pop_vlan_action)
2724                 return rte_flow_error_set(error, ENOTSUP,
2725                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2726                                           NULL,
2727                                           "pop vlan action is not supported");
2728         /* Pop VLAN is not supported in egress except for CX6 FDB mode. */
2729         if (attr->transfer) {
2730                 bool fdb_tx = priv->representor_id != UINT16_MAX;
2731                 bool is_cx5 = sh->steering_format_version ==
2732                     MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
2733
2734                 if (fdb_tx && is_cx5)
2735                         direction_error = true;
2736         } else if (attr->egress) {
2737                 direction_error = true;
2738         }
2739         if (direction_error)
2740                 return rte_flow_error_set(error, ENOTSUP,
2741                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2742                                           NULL,
2743                                           "pop vlan action not supported for egress");
2744         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2745                 return rte_flow_error_set(error, ENOTSUP,
2746                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2747                                           "no support for multiple VLAN "
2748                                           "actions");
2749         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2750         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2751             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2752                 return rte_flow_error_set(error, ENOTSUP,
2753                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2754                                           NULL,
2755                                           "cannot pop vlan after decap without "
2756                                           "match on inner vlan in the flow");
2757         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2758         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2759             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2760                 return rte_flow_error_set(error, ENOTSUP,
2761                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2762                                           NULL,
2763                                           "cannot pop vlan without a "
2764                                           "match on (outer) vlan in the flow");
2765         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2766                 return rte_flow_error_set(error, EINVAL,
2767                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2768                                           "wrong action order, port_id should "
2769                                           "be after pop VLAN action");
2770         if (!attr->transfer && priv->representor)
2771                 return rte_flow_error_set(error, ENOTSUP,
2772                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2773                                           "pop vlan action for VF representor "
2774                                           "not supported on NIC table");
2775         return 0;
2776 }
2777
2778 /**
2779  * Get VLAN default info from vlan match info.
2780  *
2781  * @param[in] items
2782  *   the list of item specifications.
2783  * @param[out] vlan
2784  *   pointer VLAN info to fill to.
2785  *
2786  * @return
2787  *   0 on success, a negative errno value otherwise and rte_errno is set.
2788  */
2789 static void
2790 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2791                                   struct rte_vlan_hdr *vlan)
2792 {
2793         const struct rte_flow_item_vlan nic_mask = {
2794                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2795                                 MLX5DV_FLOW_VLAN_VID_MASK),
2796                 .inner_type = RTE_BE16(0xffff),
2797         };
2798
2799         if (items == NULL)
2800                 return;
2801         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2802                 int type = items->type;
2803
2804                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2805                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2806                         break;
2807         }
2808         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2809                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2810                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2811
2812                 /* If VLAN item in pattern doesn't contain data, return here. */
2813                 if (!vlan_v)
2814                         return;
2815                 if (!vlan_m)
2816                         vlan_m = &nic_mask;
2817                 /* Only full match values are accepted */
2818                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2819                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2820                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2821                         vlan->vlan_tci |=
2822                                 rte_be_to_cpu_16(vlan_v->tci &
2823                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2824                 }
2825                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2826                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2827                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2828                         vlan->vlan_tci |=
2829                                 rte_be_to_cpu_16(vlan_v->tci &
2830                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2831                 }
2832                 if (vlan_m->inner_type == nic_mask.inner_type)
2833                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2834                                                            vlan_m->inner_type);
2835         }
2836 }
2837
2838 /**
2839  * Validate the push VLAN action.
2840  *
2841  * @param[in] dev
2842  *   Pointer to the rte_eth_dev structure.
2843  * @param[in] action_flags
2844  *   Holds the actions detected until now.
2845  * @param[in] item_flags
2846  *   The items found in this flow rule.
2847  * @param[in] action
2848  *   Pointer to the action structure.
2849  * @param[in] attr
2850  *   Pointer to flow attributes
2851  * @param[out] error
2852  *   Pointer to error structure.
2853  *
2854  * @return
2855  *   0 on success, a negative errno value otherwise and rte_errno is set.
2856  */
2857 static int
2858 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2859                                   uint64_t action_flags,
2860                                   const struct rte_flow_item_vlan *vlan_m,
2861                                   const struct rte_flow_action *action,
2862                                   const struct rte_flow_attr *attr,
2863                                   struct rte_flow_error *error)
2864 {
2865         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2866         const struct mlx5_priv *priv = dev->data->dev_private;
2867         struct mlx5_dev_ctx_shared *sh = priv->sh;
2868         bool direction_error = false;
2869
2870         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2871             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2872                 return rte_flow_error_set(error, EINVAL,
2873                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2874                                           "invalid vlan ethertype");
2875         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2876                 return rte_flow_error_set(error, EINVAL,
2877                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2878                                           "wrong action order, port_id should "
2879                                           "be after push VLAN");
2880         /* Push VLAN is not supported in ingress except for CX6 FDB mode. */
2881         if (attr->transfer) {
2882                 bool fdb_tx = priv->representor_id != UINT16_MAX;
2883                 bool is_cx5 = sh->steering_format_version ==
2884                     MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
2885
2886                 if (!fdb_tx && is_cx5)
2887                         direction_error = true;
2888         } else if (attr->ingress) {
2889                 direction_error = true;
2890         }
2891         if (direction_error)
2892                 return rte_flow_error_set(error, ENOTSUP,
2893                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2894                                           NULL,
2895                                           "push vlan action not supported for ingress");
2896         if (!attr->transfer && priv->representor)
2897                 return rte_flow_error_set(error, ENOTSUP,
2898                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2899                                           "push vlan action for VF representor "
2900                                           "not supported on NIC table");
2901         if (vlan_m &&
2902             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2903             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2904                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2905             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2906             !(mlx5_flow_find_action
2907                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2908                 return rte_flow_error_set(error, EINVAL,
2909                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2910                                           "not full match mask on VLAN PCP and "
2911                                           "there is no of_set_vlan_pcp action, "
2912                                           "push VLAN action cannot figure out "
2913                                           "PCP value");
2914         if (vlan_m &&
2915             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2916             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2917                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2918             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2919             !(mlx5_flow_find_action
2920                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2921                 return rte_flow_error_set(error, EINVAL,
2922                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2923                                           "not full match mask on VLAN VID and "
2924                                           "there is no of_set_vlan_vid action, "
2925                                           "push VLAN action cannot figure out "
2926                                           "VID value");
2927         (void)attr;
2928         return 0;
2929 }
2930
2931 /**
2932  * Validate the set VLAN PCP.
2933  *
2934  * @param[in] action_flags
2935  *   Holds the actions detected until now.
2936  * @param[in] actions
2937  *   Pointer to the list of actions remaining in the flow rule.
2938  * @param[out] error
2939  *   Pointer to error structure.
2940  *
2941  * @return
2942  *   0 on success, a negative errno value otherwise and rte_errno is set.
2943  */
2944 static int
2945 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2946                                      const struct rte_flow_action actions[],
2947                                      struct rte_flow_error *error)
2948 {
2949         const struct rte_flow_action *action = actions;
2950         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2951
2952         if (conf->vlan_pcp > 7)
2953                 return rte_flow_error_set(error, EINVAL,
2954                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2955                                           "VLAN PCP value is too big");
2956         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2957                 return rte_flow_error_set(error, ENOTSUP,
2958                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2959                                           "set VLAN PCP action must follow "
2960                                           "the push VLAN action");
2961         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2962                 return rte_flow_error_set(error, ENOTSUP,
2963                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2964                                           "Multiple VLAN PCP modification are "
2965                                           "not supported");
2966         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2967                 return rte_flow_error_set(error, EINVAL,
2968                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2969                                           "wrong action order, port_id should "
2970                                           "be after set VLAN PCP");
2971         return 0;
2972 }
2973
2974 /**
2975  * Validate the set VLAN VID.
2976  *
2977  * @param[in] item_flags
2978  *   Holds the items detected in this rule.
2979  * @param[in] action_flags
2980  *   Holds the actions detected until now.
2981  * @param[in] actions
2982  *   Pointer to the list of actions remaining in the flow rule.
2983  * @param[out] error
2984  *   Pointer to error structure.
2985  *
2986  * @return
2987  *   0 on success, a negative errno value otherwise and rte_errno is set.
2988  */
2989 static int
2990 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2991                                      uint64_t action_flags,
2992                                      const struct rte_flow_action actions[],
2993                                      struct rte_flow_error *error)
2994 {
2995         const struct rte_flow_action *action = actions;
2996         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2997
2998         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2999                 return rte_flow_error_set(error, EINVAL,
3000                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3001                                           "VLAN VID value is too big");
3002         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
3003             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
3004                 return rte_flow_error_set(error, ENOTSUP,
3005                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3006                                           "set VLAN VID action must follow push"
3007                                           " VLAN action or match on VLAN item");
3008         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
3009                 return rte_flow_error_set(error, ENOTSUP,
3010                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3011                                           "Multiple VLAN VID modifications are "
3012                                           "not supported");
3013         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
3014                 return rte_flow_error_set(error, EINVAL,
3015                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3016                                           "wrong action order, port_id should "
3017                                           "be after set VLAN VID");
3018         return 0;
3019 }
3020
3021 /*
3022  * Validate the FLAG action.
3023  *
3024  * @param[in] dev
3025  *   Pointer to the rte_eth_dev structure.
3026  * @param[in] action_flags
3027  *   Holds the actions detected until now.
3028  * @param[in] attr
3029  *   Pointer to flow attributes
3030  * @param[out] error
3031  *   Pointer to error structure.
3032  *
3033  * @return
3034  *   0 on success, a negative errno value otherwise and rte_errno is set.
3035  */
3036 static int
3037 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
3038                              uint64_t action_flags,
3039                              const struct rte_flow_attr *attr,
3040                              struct rte_flow_error *error)
3041 {
3042         struct mlx5_priv *priv = dev->data->dev_private;
3043         struct mlx5_dev_config *config = &priv->config;
3044         int ret;
3045
3046         /* Fall back if no extended metadata register support. */
3047         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3048                 return mlx5_flow_validate_action_flag(action_flags, attr,
3049                                                       error);
3050         /* Extensive metadata mode requires registers. */
3051         if (!mlx5_flow_ext_mreg_supported(dev))
3052                 return rte_flow_error_set(error, ENOTSUP,
3053                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3054                                           "no metadata registers "
3055                                           "to support flag action");
3056         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
3057                 return rte_flow_error_set(error, ENOTSUP,
3058                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3059                                           "extended metadata register"
3060                                           " isn't available");
3061         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3062         if (ret < 0)
3063                 return ret;
3064         MLX5_ASSERT(ret > 0);
3065         if (action_flags & MLX5_FLOW_ACTION_MARK)
3066                 return rte_flow_error_set(error, EINVAL,
3067                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3068                                           "can't mark and flag in same flow");
3069         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3070                 return rte_flow_error_set(error, EINVAL,
3071                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3072                                           "can't have 2 flag"
3073                                           " actions in same flow");
3074         return 0;
3075 }
3076
3077 /**
3078  * Validate MARK action.
3079  *
3080  * @param[in] dev
3081  *   Pointer to the rte_eth_dev structure.
3082  * @param[in] action
3083  *   Pointer to action.
3084  * @param[in] action_flags
3085  *   Holds the actions detected until now.
3086  * @param[in] attr
3087  *   Pointer to flow attributes
3088  * @param[out] error
3089  *   Pointer to error structure.
3090  *
3091  * @return
3092  *   0 on success, a negative errno value otherwise and rte_errno is set.
3093  */
3094 static int
3095 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
3096                              const struct rte_flow_action *action,
3097                              uint64_t action_flags,
3098                              const struct rte_flow_attr *attr,
3099                              struct rte_flow_error *error)
3100 {
3101         struct mlx5_priv *priv = dev->data->dev_private;
3102         struct mlx5_dev_config *config = &priv->config;
3103         const struct rte_flow_action_mark *mark = action->conf;
3104         int ret;
3105
3106         if (is_tunnel_offload_active(dev))
3107                 return rte_flow_error_set(error, ENOTSUP,
3108                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3109                                           "no mark action "
3110                                           "if tunnel offload active");
3111         /* Fall back if no extended metadata register support. */
3112         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3113                 return mlx5_flow_validate_action_mark(action, action_flags,
3114                                                       attr, error);
3115         /* Extensive metadata mode requires registers. */
3116         if (!mlx5_flow_ext_mreg_supported(dev))
3117                 return rte_flow_error_set(error, ENOTSUP,
3118                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3119                                           "no metadata registers "
3120                                           "to support mark action");
3121         if (!priv->sh->dv_mark_mask)
3122                 return rte_flow_error_set(error, ENOTSUP,
3123                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3124                                           "extended metadata register"
3125                                           " isn't available");
3126         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3127         if (ret < 0)
3128                 return ret;
3129         MLX5_ASSERT(ret > 0);
3130         if (!mark)
3131                 return rte_flow_error_set(error, EINVAL,
3132                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3133                                           "configuration cannot be null");
3134         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3135                 return rte_flow_error_set(error, EINVAL,
3136                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3137                                           &mark->id,
3138                                           "mark id exceeds the limit");
3139         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3140                 return rte_flow_error_set(error, EINVAL,
3141                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3142                                           "can't flag and mark in same flow");
3143         if (action_flags & MLX5_FLOW_ACTION_MARK)
3144                 return rte_flow_error_set(error, EINVAL,
3145                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3146                                           "can't have 2 mark actions in same"
3147                                           " flow");
3148         return 0;
3149 }
3150
3151 /**
3152  * Validate SET_META action.
3153  *
3154  * @param[in] dev
3155  *   Pointer to the rte_eth_dev structure.
3156  * @param[in] action
3157  *   Pointer to the action structure.
3158  * @param[in] action_flags
3159  *   Holds the actions detected until now.
3160  * @param[in] attr
3161  *   Pointer to flow attributes
3162  * @param[out] error
3163  *   Pointer to error structure.
3164  *
3165  * @return
3166  *   0 on success, a negative errno value otherwise and rte_errno is set.
3167  */
3168 static int
3169 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3170                                  const struct rte_flow_action *action,
3171                                  uint64_t action_flags __rte_unused,
3172                                  const struct rte_flow_attr *attr,
3173                                  struct rte_flow_error *error)
3174 {
3175         struct mlx5_priv *priv = dev->data->dev_private;
3176         struct mlx5_dev_config *config = &priv->config;
3177         const struct rte_flow_action_set_meta *conf;
3178         uint32_t nic_mask = UINT32_MAX;
3179         int reg;
3180
3181         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
3182             !mlx5_flow_ext_mreg_supported(dev))
3183                 return rte_flow_error_set(error, ENOTSUP,
3184                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3185                                           "extended metadata register"
3186                                           " isn't supported");
3187         reg = flow_dv_get_metadata_reg(dev, attr, error);
3188         if (reg < 0)
3189                 return reg;
3190         if (reg == REG_NON)
3191                 return rte_flow_error_set(error, ENOTSUP,
3192                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3193                                           "unavailable extended metadata register");
3194         if (reg != REG_A && reg != REG_B) {
3195                 struct mlx5_priv *priv = dev->data->dev_private;
3196
3197                 nic_mask = priv->sh->dv_meta_mask;
3198         }
3199         if (!(action->conf))
3200                 return rte_flow_error_set(error, EINVAL,
3201                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3202                                           "configuration cannot be null");
3203         conf = (const struct rte_flow_action_set_meta *)action->conf;
3204         if (!conf->mask)
3205                 return rte_flow_error_set(error, EINVAL,
3206                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3207                                           "zero mask doesn't have any effect");
3208         if (conf->mask & ~nic_mask)
3209                 return rte_flow_error_set(error, EINVAL,
3210                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3211                                           "meta data must be within reg C0");
3212         return 0;
3213 }
3214
3215 /**
3216  * Validate SET_TAG action.
3217  *
3218  * @param[in] dev
3219  *   Pointer to the rte_eth_dev structure.
3220  * @param[in] action
3221  *   Pointer to the action structure.
3222  * @param[in] action_flags
3223  *   Holds the actions detected until now.
3224  * @param[in] attr
3225  *   Pointer to flow attributes
3226  * @param[out] error
3227  *   Pointer to error structure.
3228  *
3229  * @return
3230  *   0 on success, a negative errno value otherwise and rte_errno is set.
3231  */
3232 static int
3233 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3234                                 const struct rte_flow_action *action,
3235                                 uint64_t action_flags,
3236                                 const struct rte_flow_attr *attr,
3237                                 struct rte_flow_error *error)
3238 {
3239         const struct rte_flow_action_set_tag *conf;
3240         const uint64_t terminal_action_flags =
3241                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3242                 MLX5_FLOW_ACTION_RSS;
3243         int ret;
3244
3245         if (!mlx5_flow_ext_mreg_supported(dev))
3246                 return rte_flow_error_set(error, ENOTSUP,
3247                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3248                                           "extensive metadata register"
3249                                           " isn't supported");
3250         if (!(action->conf))
3251                 return rte_flow_error_set(error, EINVAL,
3252                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3253                                           "configuration cannot be null");
3254         conf = (const struct rte_flow_action_set_tag *)action->conf;
3255         if (!conf->mask)
3256                 return rte_flow_error_set(error, EINVAL,
3257                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3258                                           "zero mask doesn't have any effect");
3259         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3260         if (ret < 0)
3261                 return ret;
3262         if (!attr->transfer && attr->ingress &&
3263             (action_flags & terminal_action_flags))
3264                 return rte_flow_error_set(error, EINVAL,
3265                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3266                                           "set_tag has no effect"
3267                                           " with terminal actions");
3268         return 0;
3269 }
3270
3271 /**
3272  * Validate count action.
3273  *
3274  * @param[in] dev
3275  *   Pointer to rte_eth_dev structure.
3276  * @param[in] shared
3277  *   Indicator if action is shared.
3278  * @param[in] action_flags
3279  *   Holds the actions detected until now.
3280  * @param[out] error
3281  *   Pointer to error structure.
3282  *
3283  * @return
3284  *   0 on success, a negative errno value otherwise and rte_errno is set.
3285  */
3286 static int
3287 flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
3288                               uint64_t action_flags,
3289                               struct rte_flow_error *error)
3290 {
3291         struct mlx5_priv *priv = dev->data->dev_private;
3292
3293         if (!priv->sh->devx)
3294                 goto notsup_err;
3295         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3296                 return rte_flow_error_set(error, EINVAL,
3297                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3298                                           "duplicate count actions set");
3299         if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3300             !priv->sh->flow_hit_aso_en)
3301                 return rte_flow_error_set(error, EINVAL,
3302                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3303                                           "old age and shared count combination is not supported");
3304 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3305         return 0;
3306 #endif
3307 notsup_err:
3308         return rte_flow_error_set
3309                       (error, ENOTSUP,
3310                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3311                        NULL,
3312                        "count action not supported");
3313 }
3314
3315 /**
3316  * Validate the L2 encap action.
3317  *
3318  * @param[in] dev
3319  *   Pointer to the rte_eth_dev structure.
3320  * @param[in] action_flags
3321  *   Holds the actions detected until now.
3322  * @param[in] action
3323  *   Pointer to the action structure.
3324  * @param[in] attr
3325  *   Pointer to flow attributes.
3326  * @param[out] error
3327  *   Pointer to error structure.
3328  *
3329  * @return
3330  *   0 on success, a negative errno value otherwise and rte_errno is set.
3331  */
3332 static int
3333 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3334                                  uint64_t action_flags,
3335                                  const struct rte_flow_action *action,
3336                                  const struct rte_flow_attr *attr,
3337                                  struct rte_flow_error *error)
3338 {
3339         const struct mlx5_priv *priv = dev->data->dev_private;
3340
3341         if (!(action->conf))
3342                 return rte_flow_error_set(error, EINVAL,
3343                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3344                                           "configuration cannot be null");
3345         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3346                 return rte_flow_error_set(error, EINVAL,
3347                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3348                                           "can only have a single encap action "
3349                                           "in a flow");
3350         if (!attr->transfer && priv->representor)
3351                 return rte_flow_error_set(error, ENOTSUP,
3352                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3353                                           "encap action for VF representor "
3354                                           "not supported on NIC table");
3355         return 0;
3356 }
3357
3358 /**
3359  * Validate a decap action.
3360  *
3361  * @param[in] dev
3362  *   Pointer to the rte_eth_dev structure.
3363  * @param[in] action_flags
3364  *   Holds the actions detected until now.
3365  * @param[in] action
3366  *   Pointer to the action structure.
3367  * @param[in] item_flags
3368  *   Holds the items detected.
3369  * @param[in] attr
3370  *   Pointer to flow attributes
3371  * @param[out] error
3372  *   Pointer to error structure.
3373  *
3374  * @return
3375  *   0 on success, a negative errno value otherwise and rte_errno is set.
3376  */
3377 static int
3378 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3379                               uint64_t action_flags,
3380                               const struct rte_flow_action *action,
3381                               const uint64_t item_flags,
3382                               const struct rte_flow_attr *attr,
3383                               struct rte_flow_error *error)
3384 {
3385         const struct mlx5_priv *priv = dev->data->dev_private;
3386
3387         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
3388             !priv->config.decap_en)
3389                 return rte_flow_error_set(error, ENOTSUP,
3390                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3391                                           "decap is not enabled");
3392         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3393                 return rte_flow_error_set(error, ENOTSUP,
3394                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3395                                           action_flags &
3396                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3397                                           "have a single decap action" : "decap "
3398                                           "after encap is not supported");
3399         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3400                 return rte_flow_error_set(error, EINVAL,
3401                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3402                                           "can't have decap action after"
3403                                           " modify action");
3404         if (attr->egress)
3405                 return rte_flow_error_set(error, ENOTSUP,
3406                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3407                                           NULL,
3408                                           "decap action not supported for "
3409                                           "egress");
3410         if (!attr->transfer && priv->representor)
3411                 return rte_flow_error_set(error, ENOTSUP,
3412                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3413                                           "decap action for VF representor "
3414                                           "not supported on NIC table");
3415         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3416             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3417                 return rte_flow_error_set(error, ENOTSUP,
3418                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3419                                 "VXLAN item should be present for VXLAN decap");
3420         return 0;
3421 }
3422
3423 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3424
3425 /**
3426  * Validate the raw encap and decap actions.
3427  *
3428  * @param[in] dev
3429  *   Pointer to the rte_eth_dev structure.
3430  * @param[in] decap
3431  *   Pointer to the decap action.
3432  * @param[in] encap
3433  *   Pointer to the encap action.
3434  * @param[in] attr
3435  *   Pointer to flow attributes
3436  * @param[in/out] action_flags
3437  *   Holds the actions detected until now.
3438  * @param[out] actions_n
3439  *   pointer to the number of actions counter.
3440  * @param[in] action
3441  *   Pointer to the action structure.
3442  * @param[in] item_flags
3443  *   Holds the items detected.
3444  * @param[out] error
3445  *   Pointer to error structure.
3446  *
3447  * @return
3448  *   0 on success, a negative errno value otherwise and rte_errno is set.
3449  */
3450 static int
3451 flow_dv_validate_action_raw_encap_decap
3452         (struct rte_eth_dev *dev,
3453          const struct rte_flow_action_raw_decap *decap,
3454          const struct rte_flow_action_raw_encap *encap,
3455          const struct rte_flow_attr *attr, uint64_t *action_flags,
3456          int *actions_n, const struct rte_flow_action *action,
3457          uint64_t item_flags, struct rte_flow_error *error)
3458 {
3459         const struct mlx5_priv *priv = dev->data->dev_private;
3460         int ret;
3461
3462         if (encap && (!encap->size || !encap->data))
3463                 return rte_flow_error_set(error, EINVAL,
3464                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3465                                           "raw encap data cannot be empty");
3466         if (decap && encap) {
3467                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3468                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3469                         /* L3 encap. */
3470                         decap = NULL;
3471                 else if (encap->size <=
3472                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3473                            decap->size >
3474                            MLX5_ENCAPSULATION_DECISION_SIZE)
3475                         /* L3 decap. */
3476                         encap = NULL;
3477                 else if (encap->size >
3478                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3479                            decap->size >
3480                            MLX5_ENCAPSULATION_DECISION_SIZE)
3481                         /* 2 L2 actions: encap and decap. */
3482                         ;
3483                 else
3484                         return rte_flow_error_set(error,
3485                                 ENOTSUP,
3486                                 RTE_FLOW_ERROR_TYPE_ACTION,
3487                                 NULL, "unsupported too small "
3488                                 "raw decap and too small raw "
3489                                 "encap combination");
3490         }
3491         if (decap) {
3492                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3493                                                     item_flags, attr, error);
3494                 if (ret < 0)
3495                         return ret;
3496                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3497                 ++(*actions_n);
3498         }
3499         if (encap) {
3500                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3501                         return rte_flow_error_set(error, ENOTSUP,
3502                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3503                                                   NULL,
3504                                                   "small raw encap size");
3505                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3506                         return rte_flow_error_set(error, EINVAL,
3507                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3508                                                   NULL,
3509                                                   "more than one encap action");
3510                 if (!attr->transfer && priv->representor)
3511                         return rte_flow_error_set
3512                                         (error, ENOTSUP,
3513                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3514                                          "encap action for VF representor "
3515                                          "not supported on NIC table");
3516                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3517                 ++(*actions_n);
3518         }
3519         return 0;
3520 }
3521
3522 /*
3523  * Validate the ASO CT action.
3524  *
3525  * @param[in] dev
3526  *   Pointer to the rte_eth_dev structure.
3527  * @param[in] action_flags
3528  *   Holds the actions detected until now.
3529  * @param[in] item_flags
3530  *   The items found in this flow rule.
3531  * @param[in] attr
3532  *   Pointer to flow attributes.
3533  * @param[out] error
3534  *   Pointer to error structure.
3535  *
3536  * @return
3537  *   0 on success, a negative errno value otherwise and rte_errno is set.
3538  */
3539 static int
3540 flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3541                                uint64_t action_flags,
3542                                uint64_t item_flags,
3543                                const struct rte_flow_attr *attr,
3544                                struct rte_flow_error *error)
3545 {
3546         RTE_SET_USED(dev);
3547
3548         if (attr->group == 0 && !attr->transfer)
3549                 return rte_flow_error_set(error, ENOTSUP,
3550                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3551                                           NULL,
3552                                           "Only support non-root table");
3553         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
3554                 return rte_flow_error_set(error, ENOTSUP,
3555                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3556                                           "CT cannot follow a fate action");
3557         if ((action_flags & MLX5_FLOW_ACTION_METER) ||
3558             (action_flags & MLX5_FLOW_ACTION_AGE))
3559                 return rte_flow_error_set(error, EINVAL,
3560                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3561                                           "Only one ASO action is supported");
3562         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3563                 return rte_flow_error_set(error, EINVAL,
3564                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3565                                           "Encap cannot exist before CT");
3566         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3567                 return rte_flow_error_set(error, EINVAL,
3568                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3569                                           "Not a outer TCP packet");
3570         return 0;
3571 }
3572
3573 int
3574 flow_dv_encap_decap_match_cb(void *tool_ctx __rte_unused,
3575                              struct mlx5_list_entry *entry, void *cb_ctx)
3576 {
3577         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3578         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3579         struct mlx5_flow_dv_encap_decap_resource *resource;
3580
3581         resource = container_of(entry, struct mlx5_flow_dv_encap_decap_resource,
3582                                 entry);
3583         if (resource->reformat_type == ctx_resource->reformat_type &&
3584             resource->ft_type == ctx_resource->ft_type &&
3585             resource->flags == ctx_resource->flags &&
3586             resource->size == ctx_resource->size &&
3587             !memcmp((const void *)resource->buf,
3588                     (const void *)ctx_resource->buf,
3589                     resource->size))
3590                 return 0;
3591         return -1;
3592 }
3593
3594 struct mlx5_list_entry *
3595 flow_dv_encap_decap_create_cb(void *tool_ctx, void *cb_ctx)
3596 {
3597         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3598         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3599         struct mlx5dv_dr_domain *domain;
3600         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3601         struct mlx5_flow_dv_encap_decap_resource *resource;
3602         uint32_t idx;
3603         int ret;
3604
3605         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3606                 domain = sh->fdb_domain;
3607         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3608                 domain = sh->rx_domain;
3609         else
3610                 domain = sh->tx_domain;
3611         /* Register new encap/decap resource. */
3612         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &idx);
3613         if (!resource) {
3614                 rte_flow_error_set(ctx->error, ENOMEM,
3615                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3616                                    "cannot allocate resource memory");
3617                 return NULL;
3618         }
3619         *resource = *ctx_resource;
3620         resource->idx = idx;
3621         ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->cdev->ctx,
3622                                                               domain, resource,
3623                                                              &resource->action);
3624         if (ret) {
3625                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3626                 rte_flow_error_set(ctx->error, ENOMEM,
3627                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3628                                    NULL, "cannot create action");
3629                 return NULL;
3630         }
3631
3632         return &resource->entry;
3633 }
3634
3635 struct mlx5_list_entry *
3636 flow_dv_encap_decap_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
3637                              void *cb_ctx)
3638 {
3639         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3640         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3641         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3642         uint32_t idx;
3643
3644         cache_resource = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3645                                            &idx);
3646         if (!cache_resource) {
3647                 rte_flow_error_set(ctx->error, ENOMEM,
3648                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3649                                    "cannot allocate resource memory");
3650                 return NULL;
3651         }
3652         memcpy(cache_resource, oentry, sizeof(*cache_resource));
3653         cache_resource->idx = idx;
3654         return &cache_resource->entry;
3655 }
3656
3657 void
3658 flow_dv_encap_decap_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3659 {
3660         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3661         struct mlx5_flow_dv_encap_decap_resource *res =
3662                                        container_of(entry, typeof(*res), entry);
3663
3664         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
3665 }
3666
3667 /**
3668  * Find existing encap/decap resource or create and register a new one.
3669  *
3670  * @param[in, out] dev
3671  *   Pointer to rte_eth_dev structure.
3672  * @param[in, out] resource
3673  *   Pointer to encap/decap resource.
3674  * @parm[in, out] dev_flow
3675  *   Pointer to the dev_flow.
3676  * @param[out] error
3677  *   pointer to error structure.
3678  *
3679  * @return
3680  *   0 on success otherwise -errno and errno is set.
3681  */
3682 static int
3683 flow_dv_encap_decap_resource_register
3684                         (struct rte_eth_dev *dev,
3685                          struct mlx5_flow_dv_encap_decap_resource *resource,
3686                          struct mlx5_flow *dev_flow,
3687                          struct rte_flow_error *error)
3688 {
3689         struct mlx5_priv *priv = dev->data->dev_private;
3690         struct mlx5_dev_ctx_shared *sh = priv->sh;
3691         struct mlx5_list_entry *entry;
3692         union {
3693                 struct {
3694                         uint32_t ft_type:8;
3695                         uint32_t refmt_type:8;
3696                         /*
3697                          * Header reformat actions can be shared between
3698                          * non-root tables. One bit to indicate non-root
3699                          * table or not.
3700                          */
3701                         uint32_t is_root:1;
3702                         uint32_t reserve:15;
3703                 };
3704                 uint32_t v32;
3705         } encap_decap_key = {
3706                 {
3707                         .ft_type = resource->ft_type,
3708                         .refmt_type = resource->reformat_type,
3709                         .is_root = !!dev_flow->dv.group,
3710                         .reserve = 0,
3711                 }
3712         };
3713         struct mlx5_flow_cb_ctx ctx = {
3714                 .error = error,
3715                 .data = resource,
3716         };
3717         struct mlx5_hlist *encaps_decaps;
3718         uint64_t key64;
3719
3720         encaps_decaps = flow_dv_hlist_prepare(sh, &sh->encaps_decaps,
3721                                 "encaps_decaps",
3722                                 MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ,
3723                                 true, true, sh,
3724                                 flow_dv_encap_decap_create_cb,
3725                                 flow_dv_encap_decap_match_cb,
3726                                 flow_dv_encap_decap_remove_cb,
3727                                 flow_dv_encap_decap_clone_cb,
3728                                 flow_dv_encap_decap_clone_free_cb);
3729         if (unlikely(!encaps_decaps))
3730                 return -rte_errno;
3731         resource->flags = dev_flow->dv.group ? 0 : 1;
3732         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3733                                  sizeof(encap_decap_key.v32), 0);
3734         if (resource->reformat_type !=
3735             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3736             resource->size)
3737                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3738         entry = mlx5_hlist_register(encaps_decaps, key64, &ctx);
3739         if (!entry)
3740                 return -rte_errno;
3741         resource = container_of(entry, typeof(*resource), entry);
3742         dev_flow->dv.encap_decap = resource;
3743         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3744         return 0;
3745 }
3746
3747 /**
3748  * Find existing table jump resource or create and register a new one.
3749  *
3750  * @param[in, out] dev
3751  *   Pointer to rte_eth_dev structure.
3752  * @param[in, out] tbl
3753  *   Pointer to flow table resource.
3754  * @parm[in, out] dev_flow
3755  *   Pointer to the dev_flow.
3756  * @param[out] error
3757  *   pointer to error structure.
3758  *
3759  * @return
3760  *   0 on success otherwise -errno and errno is set.
3761  */
3762 static int
3763 flow_dv_jump_tbl_resource_register
3764                         (struct rte_eth_dev *dev __rte_unused,
3765                          struct mlx5_flow_tbl_resource *tbl,
3766                          struct mlx5_flow *dev_flow,
3767                          struct rte_flow_error *error __rte_unused)
3768 {
3769         struct mlx5_flow_tbl_data_entry *tbl_data =
3770                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3771
3772         MLX5_ASSERT(tbl);
3773         MLX5_ASSERT(tbl_data->jump.action);
3774         dev_flow->handle->rix_jump = tbl_data->idx;
3775         dev_flow->dv.jump = &tbl_data->jump;
3776         return 0;
3777 }
3778
3779 int
3780 flow_dv_port_id_match_cb(void *tool_ctx __rte_unused,
3781                          struct mlx5_list_entry *entry, void *cb_ctx)
3782 {
3783         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3784         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3785         struct mlx5_flow_dv_port_id_action_resource *res =
3786                                        container_of(entry, typeof(*res), entry);
3787
3788         return ref->port_id != res->port_id;
3789 }
3790
3791 struct mlx5_list_entry *
3792 flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx)
3793 {
3794         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3795         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3796         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3797         struct mlx5_flow_dv_port_id_action_resource *resource;
3798         uint32_t idx;
3799         int ret;
3800
3801         /* Register new port id action resource. */
3802         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3803         if (!resource) {
3804                 rte_flow_error_set(ctx->error, ENOMEM,
3805                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3806                                    "cannot allocate port_id action memory");
3807                 return NULL;
3808         }
3809         *resource = *ref;
3810         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3811                                                         ref->port_id,
3812                                                         &resource->action);
3813         if (ret) {
3814                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3815                 rte_flow_error_set(ctx->error, ENOMEM,
3816                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3817                                    "cannot create action");
3818                 return NULL;
3819         }
3820         resource->idx = idx;
3821         return &resource->entry;
3822 }
3823
3824 struct mlx5_list_entry *
3825 flow_dv_port_id_clone_cb(void *tool_ctx,
3826                          struct mlx5_list_entry *entry __rte_unused,
3827                          void *cb_ctx)
3828 {
3829         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3830         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3831         struct mlx5_flow_dv_port_id_action_resource *resource;
3832         uint32_t idx;
3833
3834         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3835         if (!resource) {
3836                 rte_flow_error_set(ctx->error, ENOMEM,
3837                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3838                                    "cannot allocate port_id action memory");
3839                 return NULL;
3840         }
3841         memcpy(resource, entry, sizeof(*resource));
3842         resource->idx = idx;
3843         return &resource->entry;
3844 }
3845
3846 void
3847 flow_dv_port_id_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3848 {
3849         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3850         struct mlx5_flow_dv_port_id_action_resource *resource =
3851                                   container_of(entry, typeof(*resource), entry);
3852
3853         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
3854 }
3855
3856 /**
3857  * Find existing table port ID resource or create and register a new one.
3858  *
3859  * @param[in, out] dev
3860  *   Pointer to rte_eth_dev structure.
3861  * @param[in, out] ref
3862  *   Pointer to port ID action resource reference.
3863  * @parm[in, out] dev_flow
3864  *   Pointer to the dev_flow.
3865  * @param[out] error
3866  *   pointer to error structure.
3867  *
3868  * @return
3869  *   0 on success otherwise -errno and errno is set.
3870  */
3871 static int
3872 flow_dv_port_id_action_resource_register
3873                         (struct rte_eth_dev *dev,
3874                          struct mlx5_flow_dv_port_id_action_resource *ref,
3875                          struct mlx5_flow *dev_flow,
3876                          struct rte_flow_error *error)
3877 {
3878         struct mlx5_priv *priv = dev->data->dev_private;
3879         struct mlx5_list_entry *entry;
3880         struct mlx5_flow_dv_port_id_action_resource *resource;
3881         struct mlx5_flow_cb_ctx ctx = {
3882                 .error = error,
3883                 .data = ref,
3884         };
3885
3886         entry = mlx5_list_register(priv->sh->port_id_action_list, &ctx);
3887         if (!entry)
3888                 return -rte_errno;
3889         resource = container_of(entry, typeof(*resource), entry);
3890         dev_flow->dv.port_id_action = resource;
3891         dev_flow->handle->rix_port_id_action = resource->idx;
3892         return 0;
3893 }
3894
3895 int
3896 flow_dv_push_vlan_match_cb(void *tool_ctx __rte_unused,
3897                            struct mlx5_list_entry *entry, void *cb_ctx)
3898 {
3899         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3900         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3901         struct mlx5_flow_dv_push_vlan_action_resource *res =
3902                                        container_of(entry, typeof(*res), entry);
3903
3904         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3905 }
3906
3907 struct mlx5_list_entry *
3908 flow_dv_push_vlan_create_cb(void *tool_ctx, void *cb_ctx)
3909 {
3910         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3911         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3912         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3913         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3914         struct mlx5dv_dr_domain *domain;
3915         uint32_t idx;
3916         int ret;
3917
3918         /* Register new port id action resource. */
3919         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3920         if (!resource) {
3921                 rte_flow_error_set(ctx->error, ENOMEM,
3922                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3923                                    "cannot allocate push_vlan action memory");
3924                 return NULL;
3925         }
3926         *resource = *ref;
3927         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3928                 domain = sh->fdb_domain;
3929         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3930                 domain = sh->rx_domain;
3931         else
3932                 domain = sh->tx_domain;
3933         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3934                                                         &resource->action);
3935         if (ret) {
3936                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3937                 rte_flow_error_set(ctx->error, ENOMEM,
3938                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3939                                    "cannot create push vlan action");
3940                 return NULL;
3941         }
3942         resource->idx = idx;
3943         return &resource->entry;
3944 }
3945
3946 struct mlx5_list_entry *
3947 flow_dv_push_vlan_clone_cb(void *tool_ctx,
3948                            struct mlx5_list_entry *entry __rte_unused,
3949                            void *cb_ctx)
3950 {
3951         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3952         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3953         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3954         uint32_t idx;
3955
3956         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3957         if (!resource) {
3958                 rte_flow_error_set(ctx->error, ENOMEM,
3959                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3960                                    "cannot allocate push_vlan action memory");
3961                 return NULL;
3962         }
3963         memcpy(resource, entry, sizeof(*resource));
3964         resource->idx = idx;
3965         return &resource->entry;
3966 }
3967
3968 void
3969 flow_dv_push_vlan_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3970 {
3971         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3972         struct mlx5_flow_dv_push_vlan_action_resource *resource =
3973                                   container_of(entry, typeof(*resource), entry);
3974
3975         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
3976 }
3977
3978 /**
3979  * Find existing push vlan resource or create and register a new one.
3980  *
3981  * @param [in, out] dev
3982  *   Pointer to rte_eth_dev structure.
3983  * @param[in, out] ref
3984  *   Pointer to port ID action resource reference.
3985  * @parm[in, out] dev_flow
3986  *   Pointer to the dev_flow.
3987  * @param[out] error
3988  *   pointer to error structure.
3989  *
3990  * @return
3991  *   0 on success otherwise -errno and errno is set.
3992  */
3993 static int
3994 flow_dv_push_vlan_action_resource_register
3995                        (struct rte_eth_dev *dev,
3996                         struct mlx5_flow_dv_push_vlan_action_resource *ref,
3997                         struct mlx5_flow *dev_flow,
3998                         struct rte_flow_error *error)
3999 {
4000         struct mlx5_priv *priv = dev->data->dev_private;
4001         struct mlx5_flow_dv_push_vlan_action_resource *resource;
4002         struct mlx5_list_entry *entry;
4003         struct mlx5_flow_cb_ctx ctx = {
4004                 .error = error,
4005                 .data = ref,
4006         };
4007
4008         entry = mlx5_list_register(priv->sh->push_vlan_action_list, &ctx);
4009         if (!entry)
4010                 return -rte_errno;
4011         resource = container_of(entry, typeof(*resource), entry);
4012
4013         dev_flow->handle->dvh.rix_push_vlan = resource->idx;
4014         dev_flow->dv.push_vlan_res = resource;
4015         return 0;
4016 }
4017
4018 /**
4019  * Get the size of specific rte_flow_item_type hdr size
4020  *
4021  * @param[in] item_type
4022  *   Tested rte_flow_item_type.
4023  *
4024  * @return
4025  *   sizeof struct item_type, 0 if void or irrelevant.
4026  */
4027 static size_t
4028 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
4029 {
4030         size_t retval;
4031
4032         switch (item_type) {
4033         case RTE_FLOW_ITEM_TYPE_ETH:
4034                 retval = sizeof(struct rte_ether_hdr);
4035                 break;
4036         case RTE_FLOW_ITEM_TYPE_VLAN:
4037                 retval = sizeof(struct rte_vlan_hdr);
4038                 break;
4039         case RTE_FLOW_ITEM_TYPE_IPV4:
4040                 retval = sizeof(struct rte_ipv4_hdr);
4041                 break;
4042         case RTE_FLOW_ITEM_TYPE_IPV6:
4043                 retval = sizeof(struct rte_ipv6_hdr);
4044                 break;
4045         case RTE_FLOW_ITEM_TYPE_UDP:
4046                 retval = sizeof(struct rte_udp_hdr);
4047                 break;
4048         case RTE_FLOW_ITEM_TYPE_TCP:
4049                 retval = sizeof(struct rte_tcp_hdr);
4050                 break;
4051         case RTE_FLOW_ITEM_TYPE_VXLAN:
4052         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4053                 retval = sizeof(struct rte_vxlan_hdr);
4054                 break;
4055         case RTE_FLOW_ITEM_TYPE_GRE:
4056         case RTE_FLOW_ITEM_TYPE_NVGRE:
4057                 retval = sizeof(struct rte_gre_hdr);
4058                 break;
4059         case RTE_FLOW_ITEM_TYPE_MPLS:
4060                 retval = sizeof(struct rte_mpls_hdr);
4061                 break;
4062         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
4063         default:
4064                 retval = 0;
4065                 break;
4066         }
4067         return retval;
4068 }
4069
4070 #define MLX5_ENCAP_IPV4_VERSION         0x40
4071 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
4072 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
4073 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
4074 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
4075 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
4076 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
4077
4078 /**
4079  * Convert the encap action data from list of rte_flow_item to raw buffer
4080  *
4081  * @param[in] items
4082  *   Pointer to rte_flow_item objects list.
4083  * @param[out] buf
4084  *   Pointer to the output buffer.
4085  * @param[out] size
4086  *   Pointer to the output buffer size.
4087  * @param[out] error
4088  *   Pointer to the error structure.
4089  *
4090  * @return
4091  *   0 on success, a negative errno value otherwise and rte_errno is set.
4092  */
4093 static int
4094 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
4095                            size_t *size, struct rte_flow_error *error)
4096 {
4097         struct rte_ether_hdr *eth = NULL;
4098         struct rte_vlan_hdr *vlan = NULL;
4099         struct rte_ipv4_hdr *ipv4 = NULL;
4100         struct rte_ipv6_hdr *ipv6 = NULL;
4101         struct rte_udp_hdr *udp = NULL;
4102         struct rte_vxlan_hdr *vxlan = NULL;
4103         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
4104         struct rte_gre_hdr *gre = NULL;
4105         size_t len;
4106         size_t temp_size = 0;
4107
4108         if (!items)
4109                 return rte_flow_error_set(error, EINVAL,
4110                                           RTE_FLOW_ERROR_TYPE_ACTION,
4111                                           NULL, "invalid empty data");
4112         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4113                 len = flow_dv_get_item_hdr_len(items->type);
4114                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
4115                         return rte_flow_error_set(error, EINVAL,
4116                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4117                                                   (void *)items->type,
4118                                                   "items total size is too big"
4119                                                   " for encap action");
4120                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
4121                 switch (items->type) {
4122                 case RTE_FLOW_ITEM_TYPE_ETH:
4123                         eth = (struct rte_ether_hdr *)&buf[temp_size];
4124                         break;
4125                 case RTE_FLOW_ITEM_TYPE_VLAN:
4126                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
4127                         if (!eth)
4128                                 return rte_flow_error_set(error, EINVAL,
4129                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4130                                                 (void *)items->type,
4131                                                 "eth header not found");
4132                         if (!eth->ether_type)
4133                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
4134                         break;
4135                 case RTE_FLOW_ITEM_TYPE_IPV4:
4136                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
4137                         if (!vlan && !eth)
4138                                 return rte_flow_error_set(error, EINVAL,
4139                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4140                                                 (void *)items->type,
4141                                                 "neither eth nor vlan"
4142                                                 " header found");
4143                         if (vlan && !vlan->eth_proto)
4144                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4145                         else if (eth && !eth->ether_type)
4146                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4147                         if (!ipv4->version_ihl)
4148                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
4149                                                     MLX5_ENCAP_IPV4_IHL_MIN;
4150                         if (!ipv4->time_to_live)
4151                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
4152                         break;
4153                 case RTE_FLOW_ITEM_TYPE_IPV6:
4154                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
4155                         if (!vlan && !eth)
4156                                 return rte_flow_error_set(error, EINVAL,
4157                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4158                                                 (void *)items->type,
4159                                                 "neither eth nor vlan"
4160                                                 " header found");
4161                         if (vlan && !vlan->eth_proto)
4162                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4163                         else if (eth && !eth->ether_type)
4164                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4165                         if (!ipv6->vtc_flow)
4166                                 ipv6->vtc_flow =
4167                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
4168                         if (!ipv6->hop_limits)
4169                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
4170                         break;
4171                 case RTE_FLOW_ITEM_TYPE_UDP:
4172                         udp = (struct rte_udp_hdr *)&buf[temp_size];
4173                         if (!ipv4 && !ipv6)
4174                                 return rte_flow_error_set(error, EINVAL,
4175                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4176                                                 (void *)items->type,
4177                                                 "ip header not found");
4178                         if (ipv4 && !ipv4->next_proto_id)
4179                                 ipv4->next_proto_id = IPPROTO_UDP;
4180                         else if (ipv6 && !ipv6->proto)
4181                                 ipv6->proto = IPPROTO_UDP;
4182                         break;
4183                 case RTE_FLOW_ITEM_TYPE_VXLAN:
4184                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
4185                         if (!udp)
4186                                 return rte_flow_error_set(error, EINVAL,
4187                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4188                                                 (void *)items->type,
4189                                                 "udp header not found");
4190                         if (!udp->dst_port)
4191                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
4192                         if (!vxlan->vx_flags)
4193                                 vxlan->vx_flags =
4194                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
4195                         break;
4196                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4197                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
4198                         if (!udp)
4199                                 return rte_flow_error_set(error, EINVAL,
4200                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4201                                                 (void *)items->type,
4202                                                 "udp header not found");
4203                         if (!vxlan_gpe->proto)
4204                                 return rte_flow_error_set(error, EINVAL,
4205                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4206                                                 (void *)items->type,
4207                                                 "next protocol not found");
4208                         if (!udp->dst_port)
4209                                 udp->dst_port =
4210                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
4211                         if (!vxlan_gpe->vx_flags)
4212                                 vxlan_gpe->vx_flags =
4213                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
4214                         break;
4215                 case RTE_FLOW_ITEM_TYPE_GRE:
4216                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4217                         gre = (struct rte_gre_hdr *)&buf[temp_size];
4218                         if (!gre->proto)
4219                                 return rte_flow_error_set(error, EINVAL,
4220                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4221                                                 (void *)items->type,
4222                                                 "next protocol not found");
4223                         if (!ipv4 && !ipv6)
4224                                 return rte_flow_error_set(error, EINVAL,
4225                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4226                                                 (void *)items->type,
4227                                                 "ip header not found");
4228                         if (ipv4 && !ipv4->next_proto_id)
4229                                 ipv4->next_proto_id = IPPROTO_GRE;
4230                         else if (ipv6 && !ipv6->proto)
4231                                 ipv6->proto = IPPROTO_GRE;
4232                         break;
4233                 case RTE_FLOW_ITEM_TYPE_VOID:
4234                         break;
4235                 default:
4236                         return rte_flow_error_set(error, EINVAL,
4237                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4238                                                   (void *)items->type,
4239                                                   "unsupported item type");
4240                         break;
4241                 }
4242                 temp_size += len;
4243         }
4244         *size = temp_size;
4245         return 0;
4246 }
4247
4248 static int
4249 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4250 {
4251         struct rte_ether_hdr *eth = NULL;
4252         struct rte_vlan_hdr *vlan = NULL;
4253         struct rte_ipv6_hdr *ipv6 = NULL;
4254         struct rte_udp_hdr *udp = NULL;
4255         char *next_hdr;
4256         uint16_t proto;
4257
4258         eth = (struct rte_ether_hdr *)data;
4259         next_hdr = (char *)(eth + 1);
4260         proto = RTE_BE16(eth->ether_type);
4261
4262         /* VLAN skipping */
4263         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4264                 vlan = (struct rte_vlan_hdr *)next_hdr;
4265                 proto = RTE_BE16(vlan->eth_proto);
4266                 next_hdr += sizeof(struct rte_vlan_hdr);
4267         }
4268
4269         /* HW calculates IPv4 csum. no need to proceed */
4270         if (proto == RTE_ETHER_TYPE_IPV4)
4271                 return 0;
4272
4273         /* non IPv4/IPv6 header. not supported */
4274         if (proto != RTE_ETHER_TYPE_IPV6) {
4275                 return rte_flow_error_set(error, ENOTSUP,
4276                                           RTE_FLOW_ERROR_TYPE_ACTION,
4277                                           NULL, "Cannot offload non IPv4/IPv6");
4278         }
4279
4280         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4281
4282         /* ignore non UDP */
4283         if (ipv6->proto != IPPROTO_UDP)
4284                 return 0;
4285
4286         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4287         udp->dgram_cksum = 0;
4288
4289         return 0;
4290 }
4291
4292 /**
4293  * Convert L2 encap action to DV specification.
4294  *
4295  * @param[in] dev
4296  *   Pointer to rte_eth_dev structure.
4297  * @param[in] action
4298  *   Pointer to action structure.
4299  * @param[in, out] dev_flow
4300  *   Pointer to the mlx5_flow.
4301  * @param[in] transfer
4302  *   Mark if the flow is E-Switch flow.
4303  * @param[out] error
4304  *   Pointer to the error structure.
4305  *
4306  * @return
4307  *   0 on success, a negative errno value otherwise and rte_errno is set.
4308  */
4309 static int
4310 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4311                                const struct rte_flow_action *action,
4312                                struct mlx5_flow *dev_flow,
4313                                uint8_t transfer,
4314                                struct rte_flow_error *error)
4315 {
4316         const struct rte_flow_item *encap_data;
4317         const struct rte_flow_action_raw_encap *raw_encap_data;
4318         struct mlx5_flow_dv_encap_decap_resource res = {
4319                 .reformat_type =
4320                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4321                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4322                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4323         };
4324
4325         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4326                 raw_encap_data =
4327                         (const struct rte_flow_action_raw_encap *)action->conf;
4328                 res.size = raw_encap_data->size;
4329                 memcpy(res.buf, raw_encap_data->data, res.size);
4330         } else {
4331                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4332                         encap_data =
4333                                 ((const struct rte_flow_action_vxlan_encap *)
4334                                                 action->conf)->definition;
4335                 else
4336                         encap_data =
4337                                 ((const struct rte_flow_action_nvgre_encap *)
4338                                                 action->conf)->definition;
4339                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4340                                                &res.size, error))
4341                         return -rte_errno;
4342         }
4343         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4344                 return -rte_errno;
4345         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4346                 return rte_flow_error_set(error, EINVAL,
4347                                           RTE_FLOW_ERROR_TYPE_ACTION,
4348                                           NULL, "can't create L2 encap action");
4349         return 0;
4350 }
4351
4352 /**
4353  * Convert L2 decap action to DV specification.
4354  *
4355  * @param[in] dev
4356  *   Pointer to rte_eth_dev structure.
4357  * @param[in, out] dev_flow
4358  *   Pointer to the mlx5_flow.
4359  * @param[in] transfer
4360  *   Mark if the flow is E-Switch flow.
4361  * @param[out] error
4362  *   Pointer to the error structure.
4363  *
4364  * @return
4365  *   0 on success, a negative errno value otherwise and rte_errno is set.
4366  */
4367 static int
4368 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4369                                struct mlx5_flow *dev_flow,
4370                                uint8_t transfer,
4371                                struct rte_flow_error *error)
4372 {
4373         struct mlx5_flow_dv_encap_decap_resource res = {
4374                 .size = 0,
4375                 .reformat_type =
4376                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4377                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4378                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4379         };
4380
4381         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4382                 return rte_flow_error_set(error, EINVAL,
4383                                           RTE_FLOW_ERROR_TYPE_ACTION,
4384                                           NULL, "can't create L2 decap action");
4385         return 0;
4386 }
4387
4388 /**
4389  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4390  *
4391  * @param[in] dev
4392  *   Pointer to rte_eth_dev structure.
4393  * @param[in] action
4394  *   Pointer to action structure.
4395  * @param[in, out] dev_flow
4396  *   Pointer to the mlx5_flow.
4397  * @param[in] attr
4398  *   Pointer to the flow attributes.
4399  * @param[out] error
4400  *   Pointer to the error structure.
4401  *
4402  * @return
4403  *   0 on success, a negative errno value otherwise and rte_errno is set.
4404  */
4405 static int
4406 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4407                                 const struct rte_flow_action *action,
4408                                 struct mlx5_flow *dev_flow,
4409                                 const struct rte_flow_attr *attr,
4410                                 struct rte_flow_error *error)
4411 {
4412         const struct rte_flow_action_raw_encap *encap_data;
4413         struct mlx5_flow_dv_encap_decap_resource res;
4414
4415         memset(&res, 0, sizeof(res));
4416         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4417         res.size = encap_data->size;
4418         memcpy(res.buf, encap_data->data, res.size);
4419         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4420                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4421                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4422         if (attr->transfer)
4423                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4424         else
4425                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4426                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4427         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4428                 return rte_flow_error_set(error, EINVAL,
4429                                           RTE_FLOW_ERROR_TYPE_ACTION,
4430                                           NULL, "can't create encap action");
4431         return 0;
4432 }
4433
4434 /**
4435  * Create action push VLAN.
4436  *
4437  * @param[in] dev
4438  *   Pointer to rte_eth_dev structure.
4439  * @param[in] attr
4440  *   Pointer to the flow attributes.
4441  * @param[in] vlan
4442  *   Pointer to the vlan to push to the Ethernet header.
4443  * @param[in, out] dev_flow
4444  *   Pointer to the mlx5_flow.
4445  * @param[out] error
4446  *   Pointer to the error structure.
4447  *
4448  * @return
4449  *   0 on success, a negative errno value otherwise and rte_errno is set.
4450  */
4451 static int
4452 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4453                                 const struct rte_flow_attr *attr,
4454                                 const struct rte_vlan_hdr *vlan,
4455                                 struct mlx5_flow *dev_flow,
4456                                 struct rte_flow_error *error)
4457 {
4458         struct mlx5_flow_dv_push_vlan_action_resource res;
4459
4460         memset(&res, 0, sizeof(res));
4461         res.vlan_tag =
4462                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4463                                  vlan->vlan_tci);
4464         if (attr->transfer)
4465                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4466         else
4467                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4468                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4469         return flow_dv_push_vlan_action_resource_register
4470                                             (dev, &res, dev_flow, error);
4471 }
4472
4473 /**
4474  * Validate the modify-header actions.
4475  *
4476  * @param[in] action_flags
4477  *   Holds the actions detected until now.
4478  * @param[in] action
4479  *   Pointer to the modify action.
4480  * @param[out] error
4481  *   Pointer to error structure.
4482  *
4483  * @return
4484  *   0 on success, a negative errno value otherwise and rte_errno is set.
4485  */
4486 static int
4487 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4488                                    const struct rte_flow_action *action,
4489                                    struct rte_flow_error *error)
4490 {
4491         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4492                 return rte_flow_error_set(error, EINVAL,
4493                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4494                                           NULL, "action configuration not set");
4495         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4496                 return rte_flow_error_set(error, EINVAL,
4497                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4498                                           "can't have encap action before"
4499                                           " modify action");
4500         return 0;
4501 }
4502
4503 /**
4504  * Validate the modify-header MAC address actions.
4505  *
4506  * @param[in] action_flags
4507  *   Holds the actions detected until now.
4508  * @param[in] action
4509  *   Pointer to the modify action.
4510  * @param[in] item_flags
4511  *   Holds the items detected.
4512  * @param[out] error
4513  *   Pointer to error structure.
4514  *
4515  * @return
4516  *   0 on success, a negative errno value otherwise and rte_errno is set.
4517  */
4518 static int
4519 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4520                                    const struct rte_flow_action *action,
4521                                    const uint64_t item_flags,
4522                                    struct rte_flow_error *error)
4523 {
4524         int ret = 0;
4525
4526         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4527         if (!ret) {
4528                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4529                         return rte_flow_error_set(error, EINVAL,
4530                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4531                                                   NULL,
4532                                                   "no L2 item in pattern");
4533         }
4534         return ret;
4535 }
4536
4537 /**
4538  * Validate the modify-header IPv4 address actions.
4539  *
4540  * @param[in] action_flags
4541  *   Holds the actions detected until now.
4542  * @param[in] action
4543  *   Pointer to the modify action.
4544  * @param[in] item_flags
4545  *   Holds the items detected.
4546  * @param[out] error
4547  *   Pointer to error structure.
4548  *
4549  * @return
4550  *   0 on success, a negative errno value otherwise and rte_errno is set.
4551  */
4552 static int
4553 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4554                                     const struct rte_flow_action *action,
4555                                     const uint64_t item_flags,
4556                                     struct rte_flow_error *error)
4557 {
4558         int ret = 0;
4559         uint64_t layer;
4560
4561         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4562         if (!ret) {
4563                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4564                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4565                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4566                 if (!(item_flags & layer))
4567                         return rte_flow_error_set(error, EINVAL,
4568                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4569                                                   NULL,
4570                                                   "no ipv4 item in pattern");
4571         }
4572         return ret;
4573 }
4574
4575 /**
4576  * Validate the modify-header IPv6 address actions.
4577  *
4578  * @param[in] action_flags
4579  *   Holds the actions detected until now.
4580  * @param[in] action
4581  *   Pointer to the modify action.
4582  * @param[in] item_flags
4583  *   Holds the items detected.
4584  * @param[out] error
4585  *   Pointer to error structure.
4586  *
4587  * @return
4588  *   0 on success, a negative errno value otherwise and rte_errno is set.
4589  */
4590 static int
4591 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4592                                     const struct rte_flow_action *action,
4593                                     const uint64_t item_flags,
4594                                     struct rte_flow_error *error)
4595 {
4596         int ret = 0;
4597         uint64_t layer;
4598
4599         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4600         if (!ret) {
4601                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4602                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4603                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4604                 if (!(item_flags & layer))
4605                         return rte_flow_error_set(error, EINVAL,
4606                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4607                                                   NULL,
4608                                                   "no ipv6 item in pattern");
4609         }
4610         return ret;
4611 }
4612
4613 /**
4614  * Validate the modify-header TP actions.
4615  *
4616  * @param[in] action_flags
4617  *   Holds the actions detected until now.
4618  * @param[in] action
4619  *   Pointer to the modify action.
4620  * @param[in] item_flags
4621  *   Holds the items detected.
4622  * @param[out] error
4623  *   Pointer to error structure.
4624  *
4625  * @return
4626  *   0 on success, a negative errno value otherwise and rte_errno is set.
4627  */
4628 static int
4629 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4630                                   const struct rte_flow_action *action,
4631                                   const uint64_t item_flags,
4632                                   struct rte_flow_error *error)
4633 {
4634         int ret = 0;
4635         uint64_t layer;
4636
4637         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4638         if (!ret) {
4639                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4640                                  MLX5_FLOW_LAYER_INNER_L4 :
4641                                  MLX5_FLOW_LAYER_OUTER_L4;
4642                 if (!(item_flags & layer))
4643                         return rte_flow_error_set(error, EINVAL,
4644                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4645                                                   NULL, "no transport layer "
4646                                                   "in pattern");
4647         }
4648         return ret;
4649 }
4650
4651 /**
4652  * Validate the modify-header actions of increment/decrement
4653  * TCP Sequence-number.
4654  *
4655  * @param[in] action_flags
4656  *   Holds the actions detected until now.
4657  * @param[in] action
4658  *   Pointer to the modify action.
4659  * @param[in] item_flags
4660  *   Holds the items detected.
4661  * @param[out] error
4662  *   Pointer to error structure.
4663  *
4664  * @return
4665  *   0 on success, a negative errno value otherwise and rte_errno is set.
4666  */
4667 static int
4668 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4669                                        const struct rte_flow_action *action,
4670                                        const uint64_t item_flags,
4671                                        struct rte_flow_error *error)
4672 {
4673         int ret = 0;
4674         uint64_t layer;
4675
4676         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4677         if (!ret) {
4678                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4679                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4680                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4681                 if (!(item_flags & layer))
4682                         return rte_flow_error_set(error, EINVAL,
4683                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4684                                                   NULL, "no TCP item in"
4685                                                   " pattern");
4686                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4687                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4688                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4689                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4690                         return rte_flow_error_set(error, EINVAL,
4691                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4692                                                   NULL,
4693                                                   "cannot decrease and increase"
4694                                                   " TCP sequence number"
4695                                                   " at the same time");
4696         }
4697         return ret;
4698 }
4699
4700 /**
4701  * Validate the modify-header actions of increment/decrement
4702  * TCP Acknowledgment number.
4703  *
4704  * @param[in] action_flags
4705  *   Holds the actions detected until now.
4706  * @param[in] action
4707  *   Pointer to the modify action.
4708  * @param[in] item_flags
4709  *   Holds the items detected.
4710  * @param[out] error
4711  *   Pointer to error structure.
4712  *
4713  * @return
4714  *   0 on success, a negative errno value otherwise and rte_errno is set.
4715  */
4716 static int
4717 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4718                                        const struct rte_flow_action *action,
4719                                        const uint64_t item_flags,
4720                                        struct rte_flow_error *error)
4721 {
4722         int ret = 0;
4723         uint64_t layer;
4724
4725         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4726         if (!ret) {
4727                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4728                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4729                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4730                 if (!(item_flags & layer))
4731                         return rte_flow_error_set(error, EINVAL,
4732                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4733                                                   NULL, "no TCP item in"
4734                                                   " pattern");
4735                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4736                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4737                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4738                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4739                         return rte_flow_error_set(error, EINVAL,
4740                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4741                                                   NULL,
4742                                                   "cannot decrease and increase"
4743                                                   " TCP acknowledgment number"
4744                                                   " at the same time");
4745         }
4746         return ret;
4747 }
4748
4749 /**
4750  * Validate the modify-header TTL actions.
4751  *
4752  * @param[in] action_flags
4753  *   Holds the actions detected until now.
4754  * @param[in] action
4755  *   Pointer to the modify action.
4756  * @param[in] item_flags
4757  *   Holds the items detected.
4758  * @param[out] error
4759  *   Pointer to error structure.
4760  *
4761  * @return
4762  *   0 on success, a negative errno value otherwise and rte_errno is set.
4763  */
4764 static int
4765 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4766                                    const struct rte_flow_action *action,
4767                                    const uint64_t item_flags,
4768                                    struct rte_flow_error *error)
4769 {
4770         int ret = 0;
4771         uint64_t layer;
4772
4773         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4774         if (!ret) {
4775                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4776                                  MLX5_FLOW_LAYER_INNER_L3 :
4777                                  MLX5_FLOW_LAYER_OUTER_L3;
4778                 if (!(item_flags & layer))
4779                         return rte_flow_error_set(error, EINVAL,
4780                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4781                                                   NULL,
4782                                                   "no IP protocol in pattern");
4783         }
4784         return ret;
4785 }
4786
4787 /**
4788  * Validate the generic modify field actions.
4789  * @param[in] dev
4790  *   Pointer to the rte_eth_dev structure.
4791  * @param[in] action_flags
4792  *   Holds the actions detected until now.
4793  * @param[in] action
4794  *   Pointer to the modify action.
4795  * @param[in] attr
4796  *   Pointer to the flow attributes.
4797  * @param[out] error
4798  *   Pointer to error structure.
4799  *
4800  * @return
4801  *   Number of header fields to modify (0 or more) on success,
4802  *   a negative errno value otherwise and rte_errno is set.
4803  */
4804 static int
4805 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4806                                    const uint64_t action_flags,
4807                                    const struct rte_flow_action *action,
4808                                    const struct rte_flow_attr *attr,
4809                                    struct rte_flow_error *error)
4810 {
4811         int ret = 0;
4812         struct mlx5_priv *priv = dev->data->dev_private;
4813         struct mlx5_dev_config *config = &priv->config;
4814         const struct rte_flow_action_modify_field *action_modify_field =
4815                 action->conf;
4816         uint32_t dst_width = mlx5_flow_item_field_width(dev,
4817                                 action_modify_field->dst.field,
4818                                 -1, attr, error);
4819         uint32_t src_width = mlx5_flow_item_field_width(dev,
4820                                 action_modify_field->src.field,
4821                                 dst_width, attr, error);
4822
4823         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4824         if (ret)
4825                 return ret;
4826
4827         if (action_modify_field->width == 0)
4828                 return rte_flow_error_set(error, EINVAL,
4829                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4830                                 "no bits are requested to be modified");
4831         else if (action_modify_field->width > dst_width ||
4832                  action_modify_field->width > src_width)
4833                 return rte_flow_error_set(error, EINVAL,
4834                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4835                                 "cannot modify more bits than"
4836                                 " the width of a field");
4837         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4838             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4839                 if ((action_modify_field->dst.offset +
4840                      action_modify_field->width > dst_width) ||
4841                     (action_modify_field->dst.offset % 32))
4842                         return rte_flow_error_set(error, EINVAL,
4843                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4844                                         "destination offset is too big"
4845                                         " or not aligned to 4 bytes");
4846                 if (action_modify_field->dst.level &&
4847                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4848                         return rte_flow_error_set(error, ENOTSUP,
4849                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4850                                         "inner header fields modification"
4851                                         " is not supported");
4852         }
4853         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4854             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4855                 if (!attr->transfer && !attr->group)
4856                         return rte_flow_error_set(error, ENOTSUP,
4857                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4858                                         "modify field action is not"
4859                                         " supported for group 0");
4860                 if ((action_modify_field->src.offset +
4861                      action_modify_field->width > src_width) ||
4862                     (action_modify_field->src.offset % 32))
4863                         return rte_flow_error_set(error, EINVAL,
4864                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4865                                         "source offset is too big"
4866                                         " or not aligned to 4 bytes");
4867                 if (action_modify_field->src.level &&
4868                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4869                         return rte_flow_error_set(error, ENOTSUP,
4870                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4871                                         "inner header fields modification"
4872                                         " is not supported");
4873         }
4874         if ((action_modify_field->dst.field ==
4875              action_modify_field->src.field) &&
4876             (action_modify_field->dst.level ==
4877              action_modify_field->src.level))
4878                 return rte_flow_error_set(error, EINVAL,
4879                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4880                                 "source and destination fields"
4881                                 " cannot be the same");
4882         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4883             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER ||
4884             action_modify_field->dst.field == RTE_FLOW_FIELD_MARK)
4885                 return rte_flow_error_set(error, EINVAL,
4886                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4887                                 "mark, immediate value or a pointer to it"
4888                                 " cannot be used as a destination");
4889         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4890             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4891                 return rte_flow_error_set(error, ENOTSUP,
4892                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4893                                 "modifications of an arbitrary"
4894                                 " place in a packet is not supported");
4895         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4896             action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4897                 return rte_flow_error_set(error, ENOTSUP,
4898                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4899                                 "modifications of the 802.1Q Tag"
4900                                 " Identifier is not supported");
4901         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4902             action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4903                 return rte_flow_error_set(error, ENOTSUP,
4904                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4905                                 "modifications of the VXLAN Network"
4906                                 " Identifier is not supported");
4907         if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4908             action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4909                 return rte_flow_error_set(error, ENOTSUP,
4910                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4911                                 "modifications of the GENEVE Network"
4912                                 " Identifier is not supported");
4913         if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4914             action_modify_field->src.field == RTE_FLOW_FIELD_MARK)
4915                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4916                     !mlx5_flow_ext_mreg_supported(dev))
4917                         return rte_flow_error_set(error, ENOTSUP,
4918                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4919                                         "cannot modify mark in legacy mode"
4920                                         " or without extensive registers");
4921         if (action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
4922             action_modify_field->src.field == RTE_FLOW_FIELD_META) {
4923                 if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
4924                     !mlx5_flow_ext_mreg_supported(dev))
4925                         return rte_flow_error_set(error, ENOTSUP,
4926                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4927                                         "cannot modify meta without"
4928                                         " extensive registers support");
4929                 ret = flow_dv_get_metadata_reg(dev, attr, error);
4930                 if (ret < 0 || ret == REG_NON)
4931                         return rte_flow_error_set(error, ENOTSUP,
4932                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4933                                         "cannot modify meta without"
4934                                         " extensive registers available");
4935         }
4936         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4937                 return rte_flow_error_set(error, ENOTSUP,
4938                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4939                                 "add and sub operations"
4940                                 " are not supported");
4941         return (action_modify_field->width / 32) +
4942                !!(action_modify_field->width % 32);
4943 }
4944
4945 /**
4946  * Validate jump action.
4947  *
4948  * @param[in] action
4949  *   Pointer to the jump action.
4950  * @param[in] action_flags
4951  *   Holds the actions detected until now.
4952  * @param[in] attributes
4953  *   Pointer to flow attributes
4954  * @param[in] external
4955  *   Action belongs to flow rule created by request external to PMD.
4956  * @param[out] error
4957  *   Pointer to error structure.
4958  *
4959  * @return
4960  *   0 on success, a negative errno value otherwise and rte_errno is set.
4961  */
4962 static int
4963 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4964                              const struct mlx5_flow_tunnel *tunnel,
4965                              const struct rte_flow_action *action,
4966                              uint64_t action_flags,
4967                              const struct rte_flow_attr *attributes,
4968                              bool external, struct rte_flow_error *error)
4969 {
4970         uint32_t target_group, table = 0;
4971         int ret = 0;
4972         struct flow_grp_info grp_info = {
4973                 .external = !!external,
4974                 .transfer = !!attributes->transfer,
4975                 .fdb_def_rule = 1,
4976                 .std_tbl_fix = 0
4977         };
4978         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4979                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4980                 return rte_flow_error_set(error, EINVAL,
4981                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4982                                           "can't have 2 fate actions in"
4983                                           " same flow");
4984         if (!action->conf)
4985                 return rte_flow_error_set(error, EINVAL,
4986                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4987                                           NULL, "action configuration not set");
4988         target_group =
4989                 ((const struct rte_flow_action_jump *)action->conf)->group;
4990         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4991                                        &grp_info, error);
4992         if (ret)
4993                 return ret;
4994         if (attributes->group == target_group &&
4995             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4996                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
4997                 return rte_flow_error_set(error, EINVAL,
4998                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4999                                           "target group must be other than"
5000                                           " the current flow group");
5001         if (table == 0)
5002                 return rte_flow_error_set(error, EINVAL,
5003                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5004                                           NULL, "root table shouldn't be destination");
5005         return 0;
5006 }
5007
5008 /*
5009  * Validate action PORT_ID / REPRESENTED_PORT.
5010  *
5011  * @param[in] dev
5012  *   Pointer to rte_eth_dev structure.
5013  * @param[in] action_flags
5014  *   Bit-fields that holds the actions detected until now.
5015  * @param[in] action
5016  *   PORT_ID / REPRESENTED_PORT action structure.
5017  * @param[in] attr
5018  *   Attributes of flow that includes this action.
5019  * @param[out] error
5020  *   Pointer to error structure.
5021  *
5022  * @return
5023  *   0 on success, a negative errno value otherwise and rte_errno is set.
5024  */
5025 static int
5026 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
5027                                 uint64_t action_flags,
5028                                 const struct rte_flow_action *action,
5029                                 const struct rte_flow_attr *attr,
5030                                 struct rte_flow_error *error)
5031 {
5032         const struct rte_flow_action_port_id *port_id;
5033         const struct rte_flow_action_ethdev *ethdev;
5034         struct mlx5_priv *act_priv;
5035         struct mlx5_priv *dev_priv;
5036         uint16_t port;
5037
5038         if (!attr->transfer)
5039                 return rte_flow_error_set(error, ENOTSUP,
5040                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5041                                           NULL,
5042                                           "port action is valid in transfer"
5043                                           " mode only");
5044         if (!action || !action->conf)
5045                 return rte_flow_error_set(error, ENOTSUP,
5046                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5047                                           NULL,
5048                                           "port action parameters must be"
5049                                           " specified");
5050         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
5051                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5052                 return rte_flow_error_set(error, EINVAL,
5053                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5054                                           "can have only one fate actions in"
5055                                           " a flow");
5056         dev_priv = mlx5_dev_to_eswitch_info(dev);
5057         if (!dev_priv)
5058                 return rte_flow_error_set(error, rte_errno,
5059                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5060                                           NULL,
5061                                           "failed to obtain E-Switch info");
5062         switch (action->type) {
5063         case RTE_FLOW_ACTION_TYPE_PORT_ID:
5064                 port_id = action->conf;
5065                 port = port_id->original ? dev->data->port_id : port_id->id;
5066                 break;
5067         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5068                 ethdev = action->conf;
5069                 port = ethdev->port_id;
5070                 break;
5071         default:
5072                 MLX5_ASSERT(false);
5073                 return rte_flow_error_set
5074                                 (error, EINVAL,
5075                                  RTE_FLOW_ERROR_TYPE_ACTION, action,
5076                                  "unknown E-Switch action");
5077         }
5078         act_priv = mlx5_port_to_eswitch_info(port, false);
5079         if (!act_priv)
5080                 return rte_flow_error_set
5081                                 (error, rte_errno,
5082                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, action->conf,
5083                                  "failed to obtain E-Switch port id for port");
5084         if (act_priv->domain_id != dev_priv->domain_id)
5085                 return rte_flow_error_set
5086                                 (error, EINVAL,
5087                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5088                                  "port does not belong to"
5089                                  " E-Switch being configured");
5090         return 0;
5091 }
5092
5093 /**
5094  * Get the maximum number of modify header actions.
5095  *
5096  * @param dev
5097  *   Pointer to rte_eth_dev structure.
5098  * @param root
5099  *   Whether action is on root table.
5100  *
5101  * @return
5102  *   Max number of modify header actions device can support.
5103  */
5104 static inline unsigned int
5105 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
5106                               bool root)
5107 {
5108         /*
5109          * There's no way to directly query the max capacity from FW.
5110          * The maximal value on root table should be assumed to be supported.
5111          */
5112         if (!root)
5113                 return MLX5_MAX_MODIFY_NUM;
5114         else
5115                 return MLX5_ROOT_TBL_MODIFY_NUM;
5116 }
5117
5118 /**
5119  * Validate the meter action.
5120  *
5121  * @param[in] dev
5122  *   Pointer to rte_eth_dev structure.
5123  * @param[in] action_flags
5124  *   Bit-fields that holds the actions detected until now.
5125  * @param[in] item_flags
5126  *   Holds the items detected.
5127  * @param[in] action
5128  *   Pointer to the meter action.
5129  * @param[in] attr
5130  *   Attributes of flow that includes this action.
5131  * @param[in] port_id_item
5132  *   Pointer to item indicating port id.
5133  * @param[out] error
5134  *   Pointer to error structure.
5135  *
5136  * @return
5137  *   0 on success, a negative errno value otherwise and rte_errno is set.
5138  */
5139 static int
5140 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
5141                                 uint64_t action_flags, uint64_t item_flags,
5142                                 const struct rte_flow_action *action,
5143                                 const struct rte_flow_attr *attr,
5144                                 const struct rte_flow_item *port_id_item,
5145                                 bool *def_policy,
5146                                 struct rte_flow_error *error)
5147 {
5148         struct mlx5_priv *priv = dev->data->dev_private;
5149         const struct rte_flow_action_meter *am = action->conf;
5150         struct mlx5_flow_meter_info *fm;
5151         struct mlx5_flow_meter_policy *mtr_policy;
5152         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
5153
5154         if (!am)
5155                 return rte_flow_error_set(error, EINVAL,
5156                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5157                                           "meter action conf is NULL");
5158
5159         if (action_flags & MLX5_FLOW_ACTION_METER)
5160                 return rte_flow_error_set(error, ENOTSUP,
5161                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5162                                           "meter chaining not support");
5163         if (action_flags & MLX5_FLOW_ACTION_JUMP)
5164                 return rte_flow_error_set(error, ENOTSUP,
5165                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5166                                           "meter with jump not support");
5167         if (!priv->mtr_en)
5168                 return rte_flow_error_set(error, ENOTSUP,
5169                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5170                                           NULL,
5171                                           "meter action not supported");
5172         fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
5173         if (!fm)
5174                 return rte_flow_error_set(error, EINVAL,
5175                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5176                                           "Meter not found");
5177         /* aso meter can always be shared by different domains */
5178         if (fm->ref_cnt && !priv->sh->meter_aso_en &&
5179             !(fm->transfer == attr->transfer ||
5180               (!fm->ingress && !attr->ingress && attr->egress) ||
5181               (!fm->egress && !attr->egress && attr->ingress)))
5182                 return rte_flow_error_set(error, EINVAL,
5183                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5184                         "Flow attributes domain are either invalid "
5185                         "or have a domain conflict with current "
5186                         "meter attributes");
5187         if (fm->def_policy) {
5188                 if (!((attr->transfer &&
5189                         mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
5190                         (attr->egress &&
5191                         mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
5192                         (attr->ingress &&
5193                         mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
5194                         return rte_flow_error_set(error, EINVAL,
5195                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5196                                           "Flow attributes domain "
5197                                           "have a conflict with current "
5198                                           "meter domain attributes");
5199                 *def_policy = true;
5200         } else {
5201                 mtr_policy = mlx5_flow_meter_policy_find(dev,
5202                                                 fm->policy_id, NULL);
5203                 if (!mtr_policy)
5204                         return rte_flow_error_set(error, EINVAL,
5205                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5206                                           "Invalid policy id for meter ");
5207                 if (!((attr->transfer && mtr_policy->transfer) ||
5208                         (attr->egress && mtr_policy->egress) ||
5209                         (attr->ingress && mtr_policy->ingress)))
5210                         return rte_flow_error_set(error, EINVAL,
5211                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5212                                           "Flow attributes domain "
5213                                           "have a conflict with current "
5214                                           "meter domain attributes");
5215                 if (attr->transfer && mtr_policy->dev) {
5216                         /**
5217                          * When policy has fate action of port_id,
5218                          * the flow should have the same src port as policy.
5219                          */
5220                         struct mlx5_priv *policy_port_priv =
5221                                         mtr_policy->dev->data->dev_private;
5222                         int32_t flow_src_port = priv->representor_id;
5223
5224                         if (port_id_item) {
5225                                 const struct rte_flow_item_port_id *spec =
5226                                                         port_id_item->spec;
5227                                 struct mlx5_priv *port_priv =
5228                                         mlx5_port_to_eswitch_info(spec->id,
5229                                                                   false);
5230                                 if (!port_priv)
5231                                         return rte_flow_error_set(error,
5232                                                 rte_errno,
5233                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5234                                                 spec,
5235                                                 "Failed to get port info.");
5236                                 flow_src_port = port_priv->representor_id;
5237                         }
5238                         if (flow_src_port != policy_port_priv->representor_id)
5239                                 return rte_flow_error_set(error,
5240                                                 rte_errno,
5241                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5242                                                 NULL,
5243                                                 "Flow and meter policy "
5244                                                 "have different src port.");
5245                 } else if (mtr_policy->is_rss) {
5246                         struct mlx5_flow_meter_policy *fp;
5247                         struct mlx5_meter_policy_action_container *acg;
5248                         struct mlx5_meter_policy_action_container *acy;
5249                         const struct rte_flow_action *rss_act;
5250                         int ret;
5251
5252                         fp = mlx5_flow_meter_hierarchy_get_final_policy(dev,
5253                                                                 mtr_policy);
5254                         if (fp == NULL)
5255                                 return rte_flow_error_set(error, EINVAL,
5256                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5257                                                   "Unable to get the final "
5258                                                   "policy in the hierarchy");
5259                         acg = &fp->act_cnt[RTE_COLOR_GREEN];
5260                         acy = &fp->act_cnt[RTE_COLOR_YELLOW];
5261                         MLX5_ASSERT(acg->fate_action ==
5262                                     MLX5_FLOW_FATE_SHARED_RSS ||
5263                                     acy->fate_action ==
5264                                     MLX5_FLOW_FATE_SHARED_RSS);
5265                         if (acg->fate_action == MLX5_FLOW_FATE_SHARED_RSS)
5266                                 rss_act = acg->rss;
5267                         else
5268                                 rss_act = acy->rss;
5269                         ret = mlx5_flow_validate_action_rss(rss_act,
5270                                         action_flags, dev, attr,
5271                                         item_flags, error);
5272                         if (ret)
5273                                 return ret;
5274                 }
5275                 *def_policy = false;
5276         }
5277         return 0;
5278 }
5279
5280 /**
5281  * Validate the age action.
5282  *
5283  * @param[in] action_flags
5284  *   Holds the actions detected until now.
5285  * @param[in] action
5286  *   Pointer to the age action.
5287  * @param[in] dev
5288  *   Pointer to the Ethernet device structure.
5289  * @param[out] error
5290  *   Pointer to error structure.
5291  *
5292  * @return
5293  *   0 on success, a negative errno value otherwise and rte_errno is set.
5294  */
5295 static int
5296 flow_dv_validate_action_age(uint64_t action_flags,
5297                             const struct rte_flow_action *action,
5298                             struct rte_eth_dev *dev,
5299                             struct rte_flow_error *error)
5300 {
5301         struct mlx5_priv *priv = dev->data->dev_private;
5302         const struct rte_flow_action_age *age = action->conf;
5303
5304         if (!priv->sh->devx || (priv->sh->cmng.counter_fallback &&
5305             !priv->sh->aso_age_mng))
5306                 return rte_flow_error_set(error, ENOTSUP,
5307                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5308                                           NULL,
5309                                           "age action not supported");
5310         if (!(action->conf))
5311                 return rte_flow_error_set(error, EINVAL,
5312                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5313                                           "configuration cannot be null");
5314         if (!(age->timeout))
5315                 return rte_flow_error_set(error, EINVAL,
5316                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5317                                           "invalid timeout value 0");
5318         if (action_flags & MLX5_FLOW_ACTION_AGE)
5319                 return rte_flow_error_set(error, EINVAL,
5320                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5321                                           "duplicate age actions set");
5322         return 0;
5323 }
5324
5325 /**
5326  * Validate the modify-header IPv4 DSCP actions.
5327  *
5328  * @param[in] action_flags
5329  *   Holds the actions detected until now.
5330  * @param[in] action
5331  *   Pointer to the modify action.
5332  * @param[in] item_flags
5333  *   Holds the items detected.
5334  * @param[out] error
5335  *   Pointer to error structure.
5336  *
5337  * @return
5338  *   0 on success, a negative errno value otherwise and rte_errno is set.
5339  */
5340 static int
5341 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5342                                          const struct rte_flow_action *action,
5343                                          const uint64_t item_flags,
5344                                          struct rte_flow_error *error)
5345 {
5346         int ret = 0;
5347
5348         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5349         if (!ret) {
5350                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5351                         return rte_flow_error_set(error, EINVAL,
5352                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5353                                                   NULL,
5354                                                   "no ipv4 item in pattern");
5355         }
5356         return ret;
5357 }
5358
5359 /**
5360  * Validate the modify-header IPv6 DSCP actions.
5361  *
5362  * @param[in] action_flags
5363  *   Holds the actions detected until now.
5364  * @param[in] action
5365  *   Pointer to the modify action.
5366  * @param[in] item_flags
5367  *   Holds the items detected.
5368  * @param[out] error
5369  *   Pointer to error structure.
5370  *
5371  * @return
5372  *   0 on success, a negative errno value otherwise and rte_errno is set.
5373  */
5374 static int
5375 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5376                                          const struct rte_flow_action *action,
5377                                          const uint64_t item_flags,
5378                                          struct rte_flow_error *error)
5379 {
5380         int ret = 0;
5381
5382         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5383         if (!ret) {
5384                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5385                         return rte_flow_error_set(error, EINVAL,
5386                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5387                                                   NULL,
5388                                                   "no ipv6 item in pattern");
5389         }
5390         return ret;
5391 }
5392
5393 int
5394 flow_dv_modify_match_cb(void *tool_ctx __rte_unused,
5395                         struct mlx5_list_entry *entry, void *cb_ctx)
5396 {
5397         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5398         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5399         struct mlx5_flow_dv_modify_hdr_resource *resource =
5400                                   container_of(entry, typeof(*resource), entry);
5401         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5402
5403         key_len += ref->actions_num * sizeof(ref->actions[0]);
5404         return ref->actions_num != resource->actions_num ||
5405                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5406 }
5407
5408 static struct mlx5_indexed_pool *
5409 flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
5410 {
5411         struct mlx5_indexed_pool *ipool = __atomic_load_n
5412                                      (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);
5413
5414         if (!ipool) {
5415                 struct mlx5_indexed_pool *expected = NULL;
5416                 struct mlx5_indexed_pool_config cfg =
5417                     (struct mlx5_indexed_pool_config) {
5418                        .size = sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
5419                                                                    (index + 1) *
5420                                            sizeof(struct mlx5_modification_cmd),
5421                        .trunk_size = 64,
5422                        .grow_trunk = 3,
5423                        .grow_shift = 2,
5424                        .need_lock = 1,
5425                        .release_mem_en = !!sh->reclaim_mode,
5426                        .per_core_cache = sh->reclaim_mode ? 0 : (1 << 16),
5427                        .malloc = mlx5_malloc,
5428                        .free = mlx5_free,
5429                        .type = "mlx5_modify_action_resource",
5430                 };
5431
5432                 cfg.size = RTE_ALIGN(cfg.size, sizeof(ipool));
5433                 ipool = mlx5_ipool_create(&cfg);
5434                 if (!ipool)
5435                         return NULL;
5436                 if (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],
5437                                                  &expected, ipool, false,
5438                                                  __ATOMIC_SEQ_CST,
5439                                                  __ATOMIC_SEQ_CST)) {
5440                         mlx5_ipool_destroy(ipool);
5441                         ipool = __atomic_load_n(&sh->mdh_ipools[index],
5442                                                 __ATOMIC_SEQ_CST);
5443                 }
5444         }
5445         return ipool;
5446 }
5447
5448 struct mlx5_list_entry *
5449 flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx)
5450 {
5451         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5452         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5453         struct mlx5dv_dr_domain *ns;
5454         struct mlx5_flow_dv_modify_hdr_resource *entry;
5455         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5456         struct mlx5_indexed_pool *ipool = flow_dv_modify_ipool_get(sh,
5457                                                           ref->actions_num - 1);
5458         int ret;
5459         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5460         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5461         uint32_t idx;
5462
5463         if (unlikely(!ipool)) {
5464                 rte_flow_error_set(ctx->error, ENOMEM,
5465                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5466                                    NULL, "cannot allocate modify ipool");
5467                 return NULL;
5468         }
5469         entry = mlx5_ipool_zmalloc(ipool, &idx);
5470         if (!entry) {
5471                 rte_flow_error_set(ctx->error, ENOMEM,
5472                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5473                                    "cannot allocate resource memory");
5474                 return NULL;
5475         }
5476         rte_memcpy(&entry->ft_type,
5477                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5478                    key_len + data_len);
5479         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5480                 ns = sh->fdb_domain;
5481         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5482                 ns = sh->tx_domain;
5483         else
5484                 ns = sh->rx_domain;
5485         ret = mlx5_flow_os_create_flow_action_modify_header
5486                                         (sh->cdev->ctx, ns, entry,
5487                                          data_len, &entry->action);
5488         if (ret) {
5489                 mlx5_ipool_free(sh->mdh_ipools[ref->actions_num - 1], idx);
5490                 rte_flow_error_set(ctx->error, ENOMEM,
5491                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5492                                    NULL, "cannot create modification action");
5493                 return NULL;
5494         }
5495         entry->idx = idx;
5496         return &entry->entry;
5497 }
5498
5499 struct mlx5_list_entry *
5500 flow_dv_modify_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
5501                         void *cb_ctx)
5502 {
5503         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5504         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5505         struct mlx5_flow_dv_modify_hdr_resource *entry;
5506         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5507         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5508         uint32_t idx;
5509
5510         entry = mlx5_ipool_malloc(sh->mdh_ipools[ref->actions_num - 1],
5511                                   &idx);
5512         if (!entry) {
5513                 rte_flow_error_set(ctx->error, ENOMEM,
5514                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5515                                    "cannot allocate resource memory");
5516                 return NULL;
5517         }
5518         memcpy(entry, oentry, sizeof(*entry) + data_len);
5519         entry->idx = idx;
5520         return &entry->entry;
5521 }
5522
5523 void
5524 flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
5525 {
5526         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5527         struct mlx5_flow_dv_modify_hdr_resource *res =
5528                 container_of(entry, typeof(*res), entry);
5529
5530         mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
5531 }
5532
5533 /**
5534  * Validate the sample action.
5535  *
5536  * @param[in, out] action_flags
5537  *   Holds the actions detected until now.
5538  * @param[in] action
5539  *   Pointer to the sample action.
5540  * @param[in] dev
5541  *   Pointer to the Ethernet device structure.
5542  * @param[in] attr
5543  *   Attributes of flow that includes this action.
5544  * @param[in] item_flags
5545  *   Holds the items detected.
5546  * @param[in] rss
5547  *   Pointer to the RSS action.
5548  * @param[out] sample_rss
5549  *   Pointer to the RSS action in sample action list.
5550  * @param[out] count
5551  *   Pointer to the COUNT action in sample action list.
5552  * @param[out] fdb_mirror_limit
5553  *   Pointer to the FDB mirror limitation flag.
5554  * @param[out] error
5555  *   Pointer to error structure.
5556  *
5557  * @return
5558  *   0 on success, a negative errno value otherwise and rte_errno is set.
5559  */
5560 static int
5561 flow_dv_validate_action_sample(uint64_t *action_flags,
5562                                const struct rte_flow_action *action,
5563                                struct rte_eth_dev *dev,
5564                                const struct rte_flow_attr *attr,
5565                                uint64_t item_flags,
5566                                const struct rte_flow_action_rss *rss,
5567                                const struct rte_flow_action_rss **sample_rss,
5568                                const struct rte_flow_action_count **count,
5569                                int *fdb_mirror_limit,
5570                                struct rte_flow_error *error)
5571 {
5572         struct mlx5_priv *priv = dev->data->dev_private;
5573         struct mlx5_dev_config *dev_conf = &priv->config;
5574         const struct rte_flow_action_sample *sample = action->conf;
5575         const struct rte_flow_action *act;
5576         uint64_t sub_action_flags = 0;
5577         uint16_t queue_index = 0xFFFF;
5578         int actions_n = 0;
5579         int ret;
5580
5581         if (!sample)
5582                 return rte_flow_error_set(error, EINVAL,
5583                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5584                                           "configuration cannot be NULL");
5585         if (sample->ratio == 0)
5586                 return rte_flow_error_set(error, EINVAL,
5587                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5588                                           "ratio value starts from 1");
5589         if (!priv->sh->devx || (sample->ratio > 0 && !priv->sampler_en))
5590                 return rte_flow_error_set(error, ENOTSUP,
5591                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5592                                           NULL,
5593                                           "sample action not supported");
5594         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5595                 return rte_flow_error_set(error, EINVAL,
5596                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5597                                           "Multiple sample actions not "
5598                                           "supported");
5599         if (*action_flags & MLX5_FLOW_ACTION_METER)
5600                 return rte_flow_error_set(error, EINVAL,
5601                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5602                                           "wrong action order, meter should "
5603                                           "be after sample action");
5604         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5605                 return rte_flow_error_set(error, EINVAL,
5606                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5607                                           "wrong action order, jump should "
5608                                           "be after sample action");
5609         if (*action_flags & MLX5_FLOW_ACTION_CT)
5610                 return rte_flow_error_set(error, EINVAL,
5611                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5612                                           "Sample after CT not supported");
5613         act = sample->actions;
5614         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5615                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5616                         return rte_flow_error_set(error, ENOTSUP,
5617                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5618                                                   act, "too many actions");
5619                 switch (act->type) {
5620                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5621                         ret = mlx5_flow_validate_action_queue(act,
5622                                                               sub_action_flags,
5623                                                               dev,
5624                                                               attr, error);
5625                         if (ret < 0)
5626                                 return ret;
5627                         queue_index = ((const struct rte_flow_action_queue *)
5628                                                         (act->conf))->index;
5629                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5630                         ++actions_n;
5631                         break;
5632                 case RTE_FLOW_ACTION_TYPE_RSS:
5633                         *sample_rss = act->conf;
5634                         ret = mlx5_flow_validate_action_rss(act,
5635                                                             sub_action_flags,
5636                                                             dev, attr,
5637                                                             item_flags,
5638                                                             error);
5639                         if (ret < 0)
5640                                 return ret;
5641                         if (rss && *sample_rss &&
5642                             ((*sample_rss)->level != rss->level ||
5643                             (*sample_rss)->types != rss->types))
5644                                 return rte_flow_error_set(error, ENOTSUP,
5645                                         RTE_FLOW_ERROR_TYPE_ACTION,
5646                                         NULL,
5647                                         "Can't use the different RSS types "
5648                                         "or level in the same flow");
5649                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5650                                 queue_index = (*sample_rss)->queue[0];
5651                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5652                         ++actions_n;
5653                         break;
5654                 case RTE_FLOW_ACTION_TYPE_MARK:
5655                         ret = flow_dv_validate_action_mark(dev, act,
5656                                                            sub_action_flags,
5657                                                            attr, error);
5658                         if (ret < 0)
5659                                 return ret;
5660                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5661                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5662                                                 MLX5_FLOW_ACTION_MARK_EXT;
5663                         else
5664                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5665                         ++actions_n;
5666                         break;
5667                 case RTE_FLOW_ACTION_TYPE_COUNT:
5668                         ret = flow_dv_validate_action_count
5669                                 (dev, false, *action_flags | sub_action_flags,
5670                                  error);
5671                         if (ret < 0)
5672                                 return ret;
5673                         *count = act->conf;
5674                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5675                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5676                         ++actions_n;
5677                         break;
5678                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5679                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5680                         ret = flow_dv_validate_action_port_id(dev,
5681                                                               sub_action_flags,
5682                                                               act,
5683                                                               attr,
5684                                                               error);
5685                         if (ret)
5686                                 return ret;
5687                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5688                         ++actions_n;
5689                         break;
5690                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5691                         ret = flow_dv_validate_action_raw_encap_decap
5692                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5693                                  &actions_n, action, item_flags, error);
5694                         if (ret < 0)
5695                                 return ret;
5696                         ++actions_n;
5697                         break;
5698                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5699                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5700                         ret = flow_dv_validate_action_l2_encap(dev,
5701                                                                sub_action_flags,
5702                                                                act, attr,
5703                                                                error);
5704                         if (ret < 0)
5705                                 return ret;
5706                         sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5707                         ++actions_n;
5708                         break;
5709                 default:
5710                         return rte_flow_error_set(error, ENOTSUP,
5711                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5712                                                   NULL,
5713                                                   "Doesn't support optional "
5714                                                   "action");
5715                 }
5716         }
5717         if (attr->ingress && !attr->transfer) {
5718                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5719                                           MLX5_FLOW_ACTION_RSS)))
5720                         return rte_flow_error_set(error, EINVAL,
5721                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5722                                                   NULL,
5723                                                   "Ingress must has a dest "
5724                                                   "QUEUE for Sample");
5725         } else if (attr->egress && !attr->transfer) {
5726                 return rte_flow_error_set(error, ENOTSUP,
5727                                           RTE_FLOW_ERROR_TYPE_ACTION,
5728                                           NULL,
5729                                           "Sample Only support Ingress "
5730                                           "or E-Switch");
5731         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5732                 MLX5_ASSERT(attr->transfer);
5733                 if (sample->ratio > 1)
5734                         return rte_flow_error_set(error, ENOTSUP,
5735                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5736                                                   NULL,
5737                                                   "E-Switch doesn't support "
5738                                                   "any optional action "
5739                                                   "for sampling");
5740                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5741                         return rte_flow_error_set(error, ENOTSUP,
5742                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5743                                                   NULL,
5744                                                   "unsupported action QUEUE");
5745                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5746                         return rte_flow_error_set(error, ENOTSUP,
5747                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5748                                                   NULL,
5749                                                   "unsupported action QUEUE");
5750                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5751                         return rte_flow_error_set(error, EINVAL,
5752                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5753                                                   NULL,
5754                                                   "E-Switch must has a dest "
5755                                                   "port for mirroring");
5756                 if (!priv->config.hca_attr.reg_c_preserve &&
5757                      priv->representor_id != UINT16_MAX)
5758                         *fdb_mirror_limit = 1;
5759         }
5760         /* Continue validation for Xcap actions.*/
5761         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5762             (queue_index == 0xFFFF ||
5763              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5764                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5765                      MLX5_FLOW_XCAP_ACTIONS)
5766                         return rte_flow_error_set(error, ENOTSUP,
5767                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5768                                                   NULL, "encap and decap "
5769                                                   "combination aren't "
5770                                                   "supported");
5771                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5772                                                         MLX5_FLOW_ACTION_ENCAP))
5773                         return rte_flow_error_set(error, ENOTSUP,
5774                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5775                                                   NULL, "encap is not supported"
5776                                                   " for ingress traffic");
5777         }
5778         return 0;
5779 }
5780
5781 /**
5782  * Find existing modify-header resource or create and register a new one.
5783  *
5784  * @param dev[in, out]
5785  *   Pointer to rte_eth_dev structure.
5786  * @param[in, out] resource
5787  *   Pointer to modify-header resource.
5788  * @parm[in, out] dev_flow
5789  *   Pointer to the dev_flow.
5790  * @param[out] error
5791  *   pointer to error structure.
5792  *
5793  * @return
5794  *   0 on success otherwise -errno and errno is set.
5795  */
5796 static int
5797 flow_dv_modify_hdr_resource_register
5798                         (struct rte_eth_dev *dev,
5799                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5800                          struct mlx5_flow *dev_flow,
5801                          struct rte_flow_error *error)
5802 {
5803         struct mlx5_priv *priv = dev->data->dev_private;
5804         struct mlx5_dev_ctx_shared *sh = priv->sh;
5805         uint32_t key_len = sizeof(*resource) -
5806                            offsetof(typeof(*resource), ft_type) +
5807                            resource->actions_num * sizeof(resource->actions[0]);
5808         struct mlx5_list_entry *entry;
5809         struct mlx5_flow_cb_ctx ctx = {
5810                 .error = error,
5811                 .data = resource,
5812         };
5813         struct mlx5_hlist *modify_cmds;
5814         uint64_t key64;
5815
5816         modify_cmds = flow_dv_hlist_prepare(sh, &sh->modify_cmds,
5817                                 "hdr_modify",
5818                                 MLX5_FLOW_HDR_MODIFY_HTABLE_SZ,
5819                                 true, false, sh,
5820                                 flow_dv_modify_create_cb,
5821                                 flow_dv_modify_match_cb,
5822                                 flow_dv_modify_remove_cb,
5823                                 flow_dv_modify_clone_cb,
5824                                 flow_dv_modify_clone_free_cb);
5825         if (unlikely(!modify_cmds))
5826                 return -rte_errno;
5827         resource->root = !dev_flow->dv.group;
5828         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5829                                                                 resource->root))
5830                 return rte_flow_error_set(error, EOVERFLOW,
5831                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5832                                           "too many modify header items");
5833         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5834         entry = mlx5_hlist_register(modify_cmds, key64, &ctx);
5835         if (!entry)
5836                 return -rte_errno;
5837         resource = container_of(entry, typeof(*resource), entry);
5838         dev_flow->handle->dvh.modify_hdr = resource;
5839         return 0;
5840 }
5841
5842 /**
5843  * Get DV flow counter by index.
5844  *
5845  * @param[in] dev
5846  *   Pointer to the Ethernet device structure.
5847  * @param[in] idx
5848  *   mlx5 flow counter index in the container.
5849  * @param[out] ppool
5850  *   mlx5 flow counter pool in the container.
5851  *
5852  * @return
5853  *   Pointer to the counter, NULL otherwise.
5854  */
5855 static struct mlx5_flow_counter *
5856 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5857                            uint32_t idx,
5858                            struct mlx5_flow_counter_pool **ppool)
5859 {
5860         struct mlx5_priv *priv = dev->data->dev_private;
5861         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5862         struct mlx5_flow_counter_pool *pool;
5863
5864         /* Decrease to original index and clear shared bit. */
5865         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5866         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5867         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5868         MLX5_ASSERT(pool);
5869         if (ppool)
5870                 *ppool = pool;
5871         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5872 }
5873
5874 /**
5875  * Check the devx counter belongs to the pool.
5876  *
5877  * @param[in] pool
5878  *   Pointer to the counter pool.
5879  * @param[in] id
5880  *   The counter devx ID.
5881  *
5882  * @return
5883  *   True if counter belongs to the pool, false otherwise.
5884  */
5885 static bool
5886 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5887 {
5888         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5889                    MLX5_COUNTERS_PER_POOL;
5890
5891         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5892                 return true;
5893         return false;
5894 }
5895
5896 /**
5897  * Get a pool by devx counter ID.
5898  *
5899  * @param[in] cmng
5900  *   Pointer to the counter management.
5901  * @param[in] id
5902  *   The counter devx ID.
5903  *
5904  * @return
5905  *   The counter pool pointer if exists, NULL otherwise,
5906  */
5907 static struct mlx5_flow_counter_pool *
5908 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5909 {
5910         uint32_t i;
5911         struct mlx5_flow_counter_pool *pool = NULL;
5912
5913         rte_spinlock_lock(&cmng->pool_update_sl);
5914         /* Check last used pool. */
5915         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5916             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5917                 pool = cmng->pools[cmng->last_pool_idx];
5918                 goto out;
5919         }
5920         /* ID out of range means no suitable pool in the container. */
5921         if (id > cmng->max_id || id < cmng->min_id)
5922                 goto out;
5923         /*
5924          * Find the pool from the end of the container, since mostly counter
5925          * ID is sequence increasing, and the last pool should be the needed
5926          * one.
5927          */
5928         i = cmng->n_valid;
5929         while (i--) {
5930                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5931
5932                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5933                         pool = pool_tmp;
5934                         break;
5935                 }
5936         }
5937 out:
5938         rte_spinlock_unlock(&cmng->pool_update_sl);
5939         return pool;
5940 }
5941
5942 /**
5943  * Resize a counter container.
5944  *
5945  * @param[in] dev
5946  *   Pointer to the Ethernet device structure.
5947  *
5948  * @return
5949  *   0 on success, otherwise negative errno value and rte_errno is set.
5950  */
5951 static int
5952 flow_dv_container_resize(struct rte_eth_dev *dev)
5953 {
5954         struct mlx5_priv *priv = dev->data->dev_private;
5955         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5956         void *old_pools = cmng->pools;
5957         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5958         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5959         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5960
5961         if (!pools) {
5962                 rte_errno = ENOMEM;
5963                 return -ENOMEM;
5964         }
5965         if (old_pools)
5966                 memcpy(pools, old_pools, cmng->n *
5967                                        sizeof(struct mlx5_flow_counter_pool *));
5968         cmng->n = resize;
5969         cmng->pools = pools;
5970         if (old_pools)
5971                 mlx5_free(old_pools);
5972         return 0;
5973 }
5974
5975 /**
5976  * Query a devx flow counter.
5977  *
5978  * @param[in] dev
5979  *   Pointer to the Ethernet device structure.
5980  * @param[in] counter
5981  *   Index to the flow counter.
5982  * @param[out] pkts
5983  *   The statistics value of packets.
5984  * @param[out] bytes
5985  *   The statistics value of bytes.
5986  *
5987  * @return
5988  *   0 on success, otherwise a negative errno value and rte_errno is set.
5989  */
5990 static inline int
5991 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5992                      uint64_t *bytes)
5993 {
5994         struct mlx5_priv *priv = dev->data->dev_private;
5995         struct mlx5_flow_counter_pool *pool = NULL;
5996         struct mlx5_flow_counter *cnt;
5997         int offset;
5998
5999         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6000         MLX5_ASSERT(pool);
6001         if (priv->sh->cmng.counter_fallback)
6002                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
6003                                         0, pkts, bytes, 0, NULL, NULL, 0);
6004         rte_spinlock_lock(&pool->sl);
6005         if (!pool->raw) {
6006                 *pkts = 0;
6007                 *bytes = 0;
6008         } else {
6009                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
6010                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
6011                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
6012         }
6013         rte_spinlock_unlock(&pool->sl);
6014         return 0;
6015 }
6016
6017 /**
6018  * Create and initialize a new counter pool.
6019  *
6020  * @param[in] dev
6021  *   Pointer to the Ethernet device structure.
6022  * @param[out] dcs
6023  *   The devX counter handle.
6024  * @param[in] age
6025  *   Whether the pool is for counter that was allocated for aging.
6026  * @param[in/out] cont_cur
6027  *   Pointer to the container pointer, it will be update in pool resize.
6028  *
6029  * @return
6030  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
6031  */
6032 static struct mlx5_flow_counter_pool *
6033 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
6034                     uint32_t age)
6035 {
6036         struct mlx5_priv *priv = dev->data->dev_private;
6037         struct mlx5_flow_counter_pool *pool;
6038         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6039         bool fallback = priv->sh->cmng.counter_fallback;
6040         uint32_t size = sizeof(*pool);
6041
6042         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
6043         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
6044         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
6045         if (!pool) {
6046                 rte_errno = ENOMEM;
6047                 return NULL;
6048         }
6049         pool->raw = NULL;
6050         pool->is_aged = !!age;
6051         pool->query_gen = 0;
6052         pool->min_dcs = dcs;
6053         rte_spinlock_init(&pool->sl);
6054         rte_spinlock_init(&pool->csl);
6055         TAILQ_INIT(&pool->counters[0]);
6056         TAILQ_INIT(&pool->counters[1]);
6057         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
6058         rte_spinlock_lock(&cmng->pool_update_sl);
6059         pool->index = cmng->n_valid;
6060         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
6061                 mlx5_free(pool);
6062                 rte_spinlock_unlock(&cmng->pool_update_sl);
6063                 return NULL;
6064         }
6065         cmng->pools[pool->index] = pool;
6066         cmng->n_valid++;
6067         if (unlikely(fallback)) {
6068                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
6069
6070                 if (base < cmng->min_id)
6071                         cmng->min_id = base;
6072                 if (base > cmng->max_id)
6073                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
6074                 cmng->last_pool_idx = pool->index;
6075         }
6076         rte_spinlock_unlock(&cmng->pool_update_sl);
6077         return pool;
6078 }
6079
6080 /**
6081  * Prepare a new counter and/or a new counter pool.
6082  *
6083  * @param[in] dev
6084  *   Pointer to the Ethernet device structure.
6085  * @param[out] cnt_free
6086  *   Where to put the pointer of a new counter.
6087  * @param[in] age
6088  *   Whether the pool is for counter that was allocated for aging.
6089  *
6090  * @return
6091  *   The counter pool pointer and @p cnt_free is set on success,
6092  *   NULL otherwise and rte_errno is set.
6093  */
6094 static struct mlx5_flow_counter_pool *
6095 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
6096                              struct mlx5_flow_counter **cnt_free,
6097                              uint32_t age)
6098 {
6099         struct mlx5_priv *priv = dev->data->dev_private;
6100         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6101         struct mlx5_flow_counter_pool *pool;
6102         struct mlx5_counters tmp_tq;
6103         struct mlx5_devx_obj *dcs = NULL;
6104         struct mlx5_flow_counter *cnt;
6105         enum mlx5_counter_type cnt_type =
6106                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6107         bool fallback = priv->sh->cmng.counter_fallback;
6108         uint32_t i;
6109
6110         if (fallback) {
6111                 /* bulk_bitmap must be 0 for single counter allocation. */
6112                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0);
6113                 if (!dcs)
6114                         return NULL;
6115                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
6116                 if (!pool) {
6117                         pool = flow_dv_pool_create(dev, dcs, age);
6118                         if (!pool) {
6119                                 mlx5_devx_cmd_destroy(dcs);
6120                                 return NULL;
6121                         }
6122                 }
6123                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
6124                 cnt = MLX5_POOL_GET_CNT(pool, i);
6125                 cnt->pool = pool;
6126                 cnt->dcs_when_free = dcs;
6127                 *cnt_free = cnt;
6128                 return pool;
6129         }
6130         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
6131         if (!dcs) {
6132                 rte_errno = ENODATA;
6133                 return NULL;
6134         }
6135         pool = flow_dv_pool_create(dev, dcs, age);
6136         if (!pool) {
6137                 mlx5_devx_cmd_destroy(dcs);
6138                 return NULL;
6139         }
6140         TAILQ_INIT(&tmp_tq);
6141         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
6142                 cnt = MLX5_POOL_GET_CNT(pool, i);
6143                 cnt->pool = pool;
6144                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
6145         }
6146         rte_spinlock_lock(&cmng->csl[cnt_type]);
6147         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
6148         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6149         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
6150         (*cnt_free)->pool = pool;
6151         return pool;
6152 }
6153
6154 /**
6155  * Allocate a flow counter.
6156  *
6157  * @param[in] dev
6158  *   Pointer to the Ethernet device structure.
6159  * @param[in] age
6160  *   Whether the counter was allocated for aging.
6161  *
6162  * @return
6163  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6164  */
6165 static uint32_t
6166 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
6167 {
6168         struct mlx5_priv *priv = dev->data->dev_private;
6169         struct mlx5_flow_counter_pool *pool = NULL;
6170         struct mlx5_flow_counter *cnt_free = NULL;
6171         bool fallback = priv->sh->cmng.counter_fallback;
6172         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6173         enum mlx5_counter_type cnt_type =
6174                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6175         uint32_t cnt_idx;
6176
6177         if (!priv->sh->devx) {
6178                 rte_errno = ENOTSUP;
6179                 return 0;
6180         }
6181         /* Get free counters from container. */
6182         rte_spinlock_lock(&cmng->csl[cnt_type]);
6183         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
6184         if (cnt_free)
6185                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
6186         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6187         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
6188                 goto err;
6189         pool = cnt_free->pool;
6190         if (fallback)
6191                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
6192         /* Create a DV counter action only in the first time usage. */
6193         if (!cnt_free->action) {
6194                 uint16_t offset;
6195                 struct mlx5_devx_obj *dcs;
6196                 int ret;
6197
6198                 if (!fallback) {
6199                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
6200                         dcs = pool->min_dcs;
6201                 } else {
6202                         offset = 0;
6203                         dcs = cnt_free->dcs_when_free;
6204                 }
6205                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
6206                                                             &cnt_free->action);
6207                 if (ret) {
6208                         rte_errno = errno;
6209                         goto err;
6210                 }
6211         }
6212         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
6213                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
6214         /* Update the counter reset values. */
6215         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
6216                                  &cnt_free->bytes))
6217                 goto err;
6218         if (!fallback && !priv->sh->cmng.query_thread_on)
6219                 /* Start the asynchronous batch query by the host thread. */
6220                 mlx5_set_query_alarm(priv->sh);
6221         /*
6222          * When the count action isn't shared (by ID), shared_info field is
6223          * used for indirect action API's refcnt.
6224          * When the counter action is not shared neither by ID nor by indirect
6225          * action API, shared info must be 1.
6226          */
6227         cnt_free->shared_info.refcnt = 1;
6228         return cnt_idx;
6229 err:
6230         if (cnt_free) {
6231                 cnt_free->pool = pool;
6232                 if (fallback)
6233                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
6234                 rte_spinlock_lock(&cmng->csl[cnt_type]);
6235                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
6236                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
6237         }
6238         return 0;
6239 }
6240
6241 /**
6242  * Get age param from counter index.
6243  *
6244  * @param[in] dev
6245  *   Pointer to the Ethernet device structure.
6246  * @param[in] counter
6247  *   Index to the counter handler.
6248  *
6249  * @return
6250  *   The aging parameter specified for the counter index.
6251  */
6252 static struct mlx5_age_param*
6253 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
6254                                 uint32_t counter)
6255 {
6256         struct mlx5_flow_counter *cnt;
6257         struct mlx5_flow_counter_pool *pool = NULL;
6258
6259         flow_dv_counter_get_by_idx(dev, counter, &pool);
6260         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
6261         cnt = MLX5_POOL_GET_CNT(pool, counter);
6262         return MLX5_CNT_TO_AGE(cnt);
6263 }
6264
6265 /**
6266  * Remove a flow counter from aged counter list.
6267  *
6268  * @param[in] dev
6269  *   Pointer to the Ethernet device structure.
6270  * @param[in] counter
6271  *   Index to the counter handler.
6272  * @param[in] cnt
6273  *   Pointer to the counter handler.
6274  */
6275 static void
6276 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
6277                                 uint32_t counter, struct mlx5_flow_counter *cnt)
6278 {
6279         struct mlx5_age_info *age_info;
6280         struct mlx5_age_param *age_param;
6281         struct mlx5_priv *priv = dev->data->dev_private;
6282         uint16_t expected = AGE_CANDIDATE;
6283
6284         age_info = GET_PORT_AGE_INFO(priv);
6285         age_param = flow_dv_counter_idx_get_age(dev, counter);
6286         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
6287                                          AGE_FREE, false, __ATOMIC_RELAXED,
6288                                          __ATOMIC_RELAXED)) {
6289                 /**
6290                  * We need the lock even it is age timeout,
6291                  * since counter may still in process.
6292                  */
6293                 rte_spinlock_lock(&age_info->aged_sl);
6294                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
6295                 rte_spinlock_unlock(&age_info->aged_sl);
6296                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
6297         }
6298 }
6299
6300 /**
6301  * Release a flow counter.
6302  *
6303  * @param[in] dev
6304  *   Pointer to the Ethernet device structure.
6305  * @param[in] counter
6306  *   Index to the counter handler.
6307  */
6308 static void
6309 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
6310 {
6311         struct mlx5_priv *priv = dev->data->dev_private;
6312         struct mlx5_flow_counter_pool *pool = NULL;
6313         struct mlx5_flow_counter *cnt;
6314         enum mlx5_counter_type cnt_type;
6315
6316         if (!counter)
6317                 return;
6318         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6319         MLX5_ASSERT(pool);
6320         if (pool->is_aged) {
6321                 flow_dv_counter_remove_from_age(dev, counter, cnt);
6322         } else {
6323                 /*
6324                  * If the counter action is shared by indirect action API,
6325                  * the atomic function reduces its references counter.
6326                  * If after the reduction the action is still referenced, the
6327                  * function returns here and does not release it.
6328                  * When the counter action is not shared by
6329                  * indirect action API, shared info is 1 before the reduction,
6330                  * so this condition is failed and function doesn't return here.
6331                  */
6332                 if (__atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
6333                                        __ATOMIC_RELAXED))
6334                         return;
6335         }
6336         cnt->pool = pool;
6337         /*
6338          * Put the counter back to list to be updated in none fallback mode.
6339          * Currently, we are using two list alternately, while one is in query,
6340          * add the freed counter to the other list based on the pool query_gen
6341          * value. After query finishes, add counter the list to the global
6342          * container counter list. The list changes while query starts. In
6343          * this case, lock will not be needed as query callback and release
6344          * function both operate with the different list.
6345          */
6346         if (!priv->sh->cmng.counter_fallback) {
6347                 rte_spinlock_lock(&pool->csl);
6348                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
6349                 rte_spinlock_unlock(&pool->csl);
6350         } else {
6351                 cnt->dcs_when_free = cnt->dcs_when_active;
6352                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6353                                            MLX5_COUNTER_TYPE_ORIGIN;
6354                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
6355                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
6356                                   cnt, next);
6357                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
6358         }
6359 }
6360
6361 /**
6362  * Resize a meter id container.
6363  *
6364  * @param[in] dev
6365  *   Pointer to the Ethernet device structure.
6366  *
6367  * @return
6368  *   0 on success, otherwise negative errno value and rte_errno is set.
6369  */
6370 static int
6371 flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
6372 {
6373         struct mlx5_priv *priv = dev->data->dev_private;
6374         struct mlx5_aso_mtr_pools_mng *pools_mng =
6375                                 &priv->sh->mtrmng->pools_mng;
6376         void *old_pools = pools_mng->pools;
6377         uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
6378         uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
6379         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6380
6381         if (!pools) {
6382                 rte_errno = ENOMEM;
6383                 return -ENOMEM;
6384         }
6385         if (!pools_mng->n)
6386                 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6387                         mlx5_free(pools);
6388                         return -ENOMEM;
6389                 }
6390         if (old_pools)
6391                 memcpy(pools, old_pools, pools_mng->n *
6392                                        sizeof(struct mlx5_aso_mtr_pool *));
6393         pools_mng->n = resize;
6394         pools_mng->pools = pools;
6395         if (old_pools)
6396                 mlx5_free(old_pools);
6397         return 0;
6398 }
6399
6400 /**
6401  * Prepare a new meter and/or a new meter pool.
6402  *
6403  * @param[in] dev
6404  *   Pointer to the Ethernet device structure.
6405  * @param[out] mtr_free
6406  *   Where to put the pointer of a new meter.g.
6407  *
6408  * @return
6409  *   The meter pool pointer and @mtr_free is set on success,
6410  *   NULL otherwise and rte_errno is set.
6411  */
6412 static struct mlx5_aso_mtr_pool *
6413 flow_dv_mtr_pool_create(struct rte_eth_dev *dev, struct mlx5_aso_mtr **mtr_free)
6414 {
6415         struct mlx5_priv *priv = dev->data->dev_private;
6416         struct mlx5_aso_mtr_pools_mng *pools_mng = &priv->sh->mtrmng->pools_mng;
6417         struct mlx5_aso_mtr_pool *pool = NULL;
6418         struct mlx5_devx_obj *dcs = NULL;
6419         uint32_t i;
6420         uint32_t log_obj_size;
6421
6422         log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6423         dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->cdev->ctx,
6424                                                       priv->sh->cdev->pdn,
6425                                                       log_obj_size);
6426         if (!dcs) {
6427                 rte_errno = ENODATA;
6428                 return NULL;
6429         }
6430         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6431         if (!pool) {
6432                 rte_errno = ENOMEM;
6433                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6434                 return NULL;
6435         }
6436         pool->devx_obj = dcs;
6437         rte_rwlock_write_lock(&pools_mng->resize_mtrwl);
6438         pool->index = pools_mng->n_valid;
6439         if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6440                 mlx5_free(pool);
6441                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6442                 rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
6443                 return NULL;
6444         }
6445         pools_mng->pools[pool->index] = pool;
6446         pools_mng->n_valid++;
6447         rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
6448         for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6449                 pool->mtrs[i].offset = i;
6450                 LIST_INSERT_HEAD(&pools_mng->meters, &pool->mtrs[i], next);
6451         }
6452         pool->mtrs[0].offset = 0;
6453         *mtr_free = &pool->mtrs[0];
6454         return pool;
6455 }
6456
6457 /**
6458  * Release a flow meter into pool.
6459  *
6460  * @param[in] dev
6461  *   Pointer to the Ethernet device structure.
6462  * @param[in] mtr_idx
6463  *   Index to aso flow meter.
6464  */
6465 static void
6466 flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6467 {
6468         struct mlx5_priv *priv = dev->data->dev_private;
6469         struct mlx5_aso_mtr_pools_mng *pools_mng =
6470                                 &priv->sh->mtrmng->pools_mng;
6471         struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6472
6473         MLX5_ASSERT(aso_mtr);
6474         rte_spinlock_lock(&pools_mng->mtrsl);
6475         memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6476         aso_mtr->state = ASO_METER_FREE;
6477         LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6478         rte_spinlock_unlock(&pools_mng->mtrsl);
6479 }
6480
6481 /**
6482  * Allocate a aso flow meter.
6483  *
6484  * @param[in] dev
6485  *   Pointer to the Ethernet device structure.
6486  *
6487  * @return
6488  *   Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6489  */
6490 static uint32_t
6491 flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6492 {
6493         struct mlx5_priv *priv = dev->data->dev_private;
6494         struct mlx5_aso_mtr *mtr_free = NULL;
6495         struct mlx5_aso_mtr_pools_mng *pools_mng =
6496                                 &priv->sh->mtrmng->pools_mng;
6497         struct mlx5_aso_mtr_pool *pool;
6498         uint32_t mtr_idx = 0;
6499
6500         if (!priv->sh->devx) {
6501                 rte_errno = ENOTSUP;
6502                 return 0;
6503         }
6504         /* Allocate the flow meter memory. */
6505         /* Get free meters from management. */
6506         rte_spinlock_lock(&pools_mng->mtrsl);
6507         mtr_free = LIST_FIRST(&pools_mng->meters);
6508         if (mtr_free)
6509                 LIST_REMOVE(mtr_free, next);
6510         if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6511                 rte_spinlock_unlock(&pools_mng->mtrsl);
6512                 return 0;
6513         }
6514         mtr_free->state = ASO_METER_WAIT;
6515         rte_spinlock_unlock(&pools_mng->mtrsl);
6516         pool = container_of(mtr_free,
6517                         struct mlx5_aso_mtr_pool,
6518                         mtrs[mtr_free->offset]);
6519         mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6520         if (!mtr_free->fm.meter_action) {
6521 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6522                 struct rte_flow_error error;
6523                 uint8_t reg_id;
6524
6525                 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6526                 mtr_free->fm.meter_action =
6527                         mlx5_glue->dv_create_flow_action_aso
6528                                                 (priv->sh->rx_domain,
6529                                                  pool->devx_obj->obj,
6530                                                  mtr_free->offset,
6531                                                  (1 << MLX5_FLOW_COLOR_GREEN),
6532                                                  reg_id - REG_C_0);
6533 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6534                 if (!mtr_free->fm.meter_action) {
6535                         flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6536                         return 0;
6537                 }
6538         }
6539         return mtr_idx;
6540 }
6541
6542 /**
6543  * Verify the @p attributes will be correctly understood by the NIC and store
6544  * them in the @p flow if everything is correct.
6545  *
6546  * @param[in] dev
6547  *   Pointer to dev struct.
6548  * @param[in] attributes
6549  *   Pointer to flow attributes
6550  * @param[in] external
6551  *   This flow rule is created by request external to PMD.
6552  * @param[out] error
6553  *   Pointer to error structure.
6554  *
6555  * @return
6556  *   - 0 on success and non root table.
6557  *   - 1 on success and root table.
6558  *   - a negative errno value otherwise and rte_errno is set.
6559  */
6560 static int
6561 flow_dv_validate_attributes(struct rte_eth_dev *dev,
6562                             const struct mlx5_flow_tunnel *tunnel,
6563                             const struct rte_flow_attr *attributes,
6564                             const struct flow_grp_info *grp_info,
6565                             struct rte_flow_error *error)
6566 {
6567         struct mlx5_priv *priv = dev->data->dev_private;
6568         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6569         int ret = 0;
6570
6571 #ifndef HAVE_MLX5DV_DR
6572         RTE_SET_USED(tunnel);
6573         RTE_SET_USED(grp_info);
6574         if (attributes->group)
6575                 return rte_flow_error_set(error, ENOTSUP,
6576                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6577                                           NULL,
6578                                           "groups are not supported");
6579 #else
6580         uint32_t table = 0;
6581
6582         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6583                                        grp_info, error);
6584         if (ret)
6585                 return ret;
6586         if (!table)
6587                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6588 #endif
6589         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6590             attributes->priority > lowest_priority)
6591                 return rte_flow_error_set(error, ENOTSUP,
6592                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6593                                           NULL,
6594                                           "priority out of range");
6595         if (attributes->transfer) {
6596                 if (!priv->config.dv_esw_en)
6597                         return rte_flow_error_set
6598                                 (error, ENOTSUP,
6599                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6600                                  "E-Switch dr is not supported");
6601                 if (!(priv->representor || priv->master))
6602                         return rte_flow_error_set
6603                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6604                                  NULL, "E-Switch configuration can only be"
6605                                  " done by a master or a representor device");
6606                 if (attributes->egress)
6607                         return rte_flow_error_set
6608                                 (error, ENOTSUP,
6609                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6610                                  "egress is not supported");
6611         }
6612         if (!(attributes->egress ^ attributes->ingress))
6613                 return rte_flow_error_set(error, ENOTSUP,
6614                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6615                                           "must specify exactly one of "
6616                                           "ingress or egress");
6617         return ret;
6618 }
6619
6620 static int
6621 validate_integrity_bits(const struct rte_flow_item_integrity *mask,
6622                         int64_t pattern_flags, uint64_t l3_flags,
6623                         uint64_t l4_flags, uint64_t ip4_flag,
6624                         struct rte_flow_error *error)
6625 {
6626         if (mask->l3_ok && !(pattern_flags & l3_flags))
6627                 return rte_flow_error_set(error, EINVAL,
6628                                           RTE_FLOW_ERROR_TYPE_ITEM,
6629                                           NULL, "missing L3 protocol");
6630
6631         if (mask->ipv4_csum_ok && !(pattern_flags & ip4_flag))
6632                 return rte_flow_error_set(error, EINVAL,
6633                                           RTE_FLOW_ERROR_TYPE_ITEM,
6634                                           NULL, "missing IPv4 protocol");
6635
6636         if ((mask->l4_ok || mask->l4_csum_ok) && !(pattern_flags & l4_flags))
6637                 return rte_flow_error_set(error, EINVAL,
6638                                           RTE_FLOW_ERROR_TYPE_ITEM,
6639                                           NULL, "missing L4 protocol");
6640
6641         return 0;
6642 }
6643
6644 static int
6645 flow_dv_validate_item_integrity_post(const struct
6646                                      rte_flow_item *integrity_items[2],
6647                                      int64_t pattern_flags,
6648                                      struct rte_flow_error *error)
6649 {
6650         const struct rte_flow_item_integrity *mask;
6651         int ret;
6652
6653         if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
6654                 mask = (typeof(mask))integrity_items[0]->mask;
6655                 ret = validate_integrity_bits(mask, pattern_flags,
6656                                               MLX5_FLOW_LAYER_OUTER_L3,
6657                                               MLX5_FLOW_LAYER_OUTER_L4,
6658                                               MLX5_FLOW_LAYER_OUTER_L3_IPV4,
6659                                               error);
6660                 if (ret)
6661                         return ret;
6662         }
6663         if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
6664                 mask = (typeof(mask))integrity_items[1]->mask;
6665                 ret = validate_integrity_bits(mask, pattern_flags,
6666                                               MLX5_FLOW_LAYER_INNER_L3,
6667                                               MLX5_FLOW_LAYER_INNER_L4,
6668                                               MLX5_FLOW_LAYER_INNER_L3_IPV4,
6669                                               error);
6670                 if (ret)
6671                         return ret;
6672         }
6673         return 0;
6674 }
6675
6676 static int
6677 flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
6678                                 const struct rte_flow_item *integrity_item,
6679                                 uint64_t pattern_flags, uint64_t *last_item,
6680                                 const struct rte_flow_item *integrity_items[2],
6681                                 struct rte_flow_error *error)
6682 {
6683         struct mlx5_priv *priv = dev->data->dev_private;
6684         const struct rte_flow_item_integrity *mask = (typeof(mask))
6685                                                      integrity_item->mask;
6686         const struct rte_flow_item_integrity *spec = (typeof(spec))
6687                                                      integrity_item->spec;
6688
6689         if (!priv->config.hca_attr.pkt_integrity_match)
6690                 return rte_flow_error_set(error, ENOTSUP,
6691                                           RTE_FLOW_ERROR_TYPE_ITEM,
6692                                           integrity_item,
6693                                           "packet integrity integrity_item not supported");
6694         if (!spec)
6695                 return rte_flow_error_set(error, ENOTSUP,
6696                                           RTE_FLOW_ERROR_TYPE_ITEM,
6697                                           integrity_item,
6698                                           "no spec for integrity item");
6699         if (!mask)
6700                 mask = &rte_flow_item_integrity_mask;
6701         if (!mlx5_validate_integrity_item(mask))
6702                 return rte_flow_error_set(error, ENOTSUP,
6703                                           RTE_FLOW_ERROR_TYPE_ITEM,
6704                                           integrity_item,
6705                                           "unsupported integrity filter");
6706         if (spec->level > 1) {
6707                 if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY)
6708                         return rte_flow_error_set
6709                                 (error, ENOTSUP,
6710                                  RTE_FLOW_ERROR_TYPE_ITEM,
6711                                  NULL, "multiple inner integrity items not supported");
6712                 integrity_items[1] = integrity_item;
6713                 *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
6714         } else {
6715                 if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY)
6716                         return rte_flow_error_set
6717                                 (error, ENOTSUP,
6718                                  RTE_FLOW_ERROR_TYPE_ITEM,
6719                                  NULL, "multiple outer integrity items not supported");
6720                 integrity_items[0] = integrity_item;
6721                 *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
6722         }
6723         return 0;
6724 }
6725
6726 static int
6727 flow_dv_validate_item_flex(struct rte_eth_dev *dev,
6728                            const struct rte_flow_item *item,
6729                            uint64_t item_flags,
6730                            uint64_t *last_item,
6731                            bool is_inner,
6732                            struct rte_flow_error *error)
6733 {
6734         const struct rte_flow_item_flex *flow_spec = item->spec;
6735         const struct rte_flow_item_flex *flow_mask = item->mask;
6736         struct mlx5_flex_item *flex;
6737
6738         if (!flow_spec)
6739                 return rte_flow_error_set(error, EINVAL,
6740                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6741                                           "flex flow item spec cannot be NULL");
6742         if (!flow_mask)
6743                 return rte_flow_error_set(error, EINVAL,
6744                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6745                                           "flex flow item mask cannot be NULL");
6746         if (item->last)
6747                 return rte_flow_error_set(error, ENOTSUP,
6748                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6749                                           "flex flow item last not supported");
6750         if (mlx5_flex_acquire_index(dev, flow_spec->handle, false) < 0)
6751                 return rte_flow_error_set(error, EINVAL,
6752                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6753                                           "invalid flex flow item handle");
6754         flex = (struct mlx5_flex_item *)flow_spec->handle;
6755         switch (flex->tunnel_mode) {
6756         case FLEX_TUNNEL_MODE_SINGLE:
6757                 if (item_flags &
6758                     (MLX5_FLOW_ITEM_OUTER_FLEX | MLX5_FLOW_ITEM_INNER_FLEX))
6759                         rte_flow_error_set(error, EINVAL,
6760                                            RTE_FLOW_ERROR_TYPE_ITEM,
6761                                            NULL, "multiple flex items not supported");
6762                 break;
6763         case FLEX_TUNNEL_MODE_OUTER:
6764                 if (is_inner)
6765                         rte_flow_error_set(error, EINVAL,
6766                                            RTE_FLOW_ERROR_TYPE_ITEM,
6767                                            NULL, "inner flex item was not configured");
6768                 if (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX)
6769                         rte_flow_error_set(error, ENOTSUP,
6770                                            RTE_FLOW_ERROR_TYPE_ITEM,
6771                                            NULL, "multiple flex items not supported");
6772                 break;
6773         case FLEX_TUNNEL_MODE_INNER:
6774                 if (!is_inner)
6775                         rte_flow_error_set(error, EINVAL,
6776                                            RTE_FLOW_ERROR_TYPE_ITEM,
6777                                            NULL, "outer flex item was not configured");
6778                 if (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)
6779                         rte_flow_error_set(error, EINVAL,
6780                                            RTE_FLOW_ERROR_TYPE_ITEM,
6781                                            NULL, "multiple flex items not supported");
6782                 break;
6783         case FLEX_TUNNEL_MODE_MULTI:
6784                 if ((is_inner && (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)) ||
6785                     (!is_inner && (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX))) {
6786                         rte_flow_error_set(error, EINVAL,
6787                                            RTE_FLOW_ERROR_TYPE_ITEM,
6788                                            NULL, "multiple flex items not supported");
6789                 }
6790                 break;
6791         case FLEX_TUNNEL_MODE_TUNNEL:
6792                 if (is_inner || (item_flags & MLX5_FLOW_ITEM_FLEX_TUNNEL))
6793                         rte_flow_error_set(error, EINVAL,
6794                                            RTE_FLOW_ERROR_TYPE_ITEM,
6795                                            NULL, "multiple flex tunnel items not supported");
6796                 break;
6797         default:
6798                 rte_flow_error_set(error, EINVAL,
6799                                    RTE_FLOW_ERROR_TYPE_ITEM,
6800                                    NULL, "invalid flex item configuration");
6801         }
6802         *last_item = flex->tunnel_mode == FLEX_TUNNEL_MODE_TUNNEL ?
6803                      MLX5_FLOW_ITEM_FLEX_TUNNEL : is_inner ?
6804                      MLX5_FLOW_ITEM_INNER_FLEX : MLX5_FLOW_ITEM_OUTER_FLEX;
6805         return 0;
6806 }
6807
6808 /**
6809  * Internal validation function. For validating both actions and items.
6810  *
6811  * @param[in] dev
6812  *   Pointer to the rte_eth_dev structure.
6813  * @param[in] attr
6814  *   Pointer to the flow attributes.
6815  * @param[in] items
6816  *   Pointer to the list of items.
6817  * @param[in] actions
6818  *   Pointer to the list of actions.
6819  * @param[in] external
6820  *   This flow rule is created by request external to PMD.
6821  * @param[in] hairpin
6822  *   Number of hairpin TX actions, 0 means classic flow.
6823  * @param[out] error
6824  *   Pointer to the error structure.
6825  *
6826  * @return
6827  *   0 on success, a negative errno value otherwise and rte_errno is set.
6828  */
6829 static int
6830 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6831                  const struct rte_flow_item items[],
6832                  const struct rte_flow_action actions[],
6833                  bool external, int hairpin, struct rte_flow_error *error)
6834 {
6835         int ret;
6836         uint64_t action_flags = 0;
6837         uint64_t item_flags = 0;
6838         uint64_t last_item = 0;
6839         uint8_t next_protocol = 0xff;
6840         uint16_t ether_type = 0;
6841         int actions_n = 0;
6842         uint8_t item_ipv6_proto = 0;
6843         int fdb_mirror_limit = 0;
6844         int modify_after_mirror = 0;
6845         const struct rte_flow_item *geneve_item = NULL;
6846         const struct rte_flow_item *gre_item = NULL;
6847         const struct rte_flow_item *gtp_item = NULL;
6848         const struct rte_flow_action_raw_decap *decap;
6849         const struct rte_flow_action_raw_encap *encap;
6850         const struct rte_flow_action_rss *rss = NULL;
6851         const struct rte_flow_action_rss *sample_rss = NULL;
6852         const struct rte_flow_action_count *sample_count = NULL;
6853         const struct rte_flow_item_tcp nic_tcp_mask = {
6854                 .hdr = {
6855                         .tcp_flags = 0xFF,
6856                         .src_port = RTE_BE16(UINT16_MAX),
6857                         .dst_port = RTE_BE16(UINT16_MAX),
6858                 }
6859         };
6860         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6861                 .hdr = {
6862                         .src_addr =
6863                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6864                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6865                         .dst_addr =
6866                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6867                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6868                         .vtc_flow = RTE_BE32(0xffffffff),
6869                         .proto = 0xff,
6870                         .hop_limits = 0xff,
6871                 },
6872                 .has_frag_ext = 1,
6873         };
6874         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6875                 .hdr = {
6876                         .common = {
6877                                 .u32 =
6878                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6879                                         .type = 0xFF,
6880                                         }).u32),
6881                         },
6882                         .dummy[0] = 0xffffffff,
6883                 },
6884         };
6885         struct mlx5_priv *priv = dev->data->dev_private;
6886         struct mlx5_dev_config *dev_conf = &priv->config;
6887         uint16_t queue_index = 0xFFFF;
6888         const struct rte_flow_item_vlan *vlan_m = NULL;
6889         uint32_t rw_act_num = 0;
6890         uint64_t is_root;
6891         const struct mlx5_flow_tunnel *tunnel;
6892         enum mlx5_tof_rule_type tof_rule_type;
6893         struct flow_grp_info grp_info = {
6894                 .external = !!external,
6895                 .transfer = !!attr->transfer,
6896                 .fdb_def_rule = !!priv->fdb_def_rule,
6897                 .std_tbl_fix = true,
6898         };
6899         const struct rte_eth_hairpin_conf *conf;
6900         const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
6901         const struct rte_flow_item *port_id_item = NULL;
6902         bool def_policy = false;
6903         uint16_t udp_dport = 0;
6904
6905         if (items == NULL)
6906                 return -1;
6907         tunnel = is_tunnel_offload_active(dev) ?
6908                  mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
6909         if (tunnel) {
6910                 if (!priv->config.dv_flow_en)
6911                         return rte_flow_error_set
6912                                 (error, ENOTSUP,
6913                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6914                                  NULL, "tunnel offload requires DV flow interface");
6915                 if (priv->representor)
6916                         return rte_flow_error_set
6917                                 (error, ENOTSUP,
6918                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6919                                  NULL, "decap not supported for VF representor");
6920                 if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
6921                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6922                 else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
6923                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6924                                         MLX5_FLOW_ACTION_DECAP;
6925                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6926                                         (dev, attr, tunnel, tof_rule_type);
6927         }
6928         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6929         if (ret < 0)
6930                 return ret;
6931         is_root = (uint64_t)ret;
6932         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6933                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6934                 int type = items->type;
6935
6936                 if (!mlx5_flow_os_item_supported(type))
6937                         return rte_flow_error_set(error, ENOTSUP,
6938                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6939                                                   NULL, "item not supported");
6940                 switch (type) {
6941                 case RTE_FLOW_ITEM_TYPE_VOID:
6942                         break;
6943                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6944                         ret = flow_dv_validate_item_port_id
6945                                         (dev, items, attr, item_flags, error);
6946                         if (ret < 0)
6947                                 return ret;
6948                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6949                         port_id_item = items;
6950                         break;
6951                 case RTE_FLOW_ITEM_TYPE_ETH:
6952                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6953                                                           true, error);
6954                         if (ret < 0)
6955                                 return ret;
6956                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6957                                              MLX5_FLOW_LAYER_OUTER_L2;
6958                         if (items->mask != NULL && items->spec != NULL) {
6959                                 ether_type =
6960                                         ((const struct rte_flow_item_eth *)
6961                                          items->spec)->type;
6962                                 ether_type &=
6963                                         ((const struct rte_flow_item_eth *)
6964                                          items->mask)->type;
6965                                 ether_type = rte_be_to_cpu_16(ether_type);
6966                         } else {
6967                                 ether_type = 0;
6968                         }
6969                         break;
6970                 case RTE_FLOW_ITEM_TYPE_VLAN:
6971                         ret = flow_dv_validate_item_vlan(items, item_flags,
6972                                                          dev, error);
6973                         if (ret < 0)
6974                                 return ret;
6975                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6976                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6977                         if (items->mask != NULL && items->spec != NULL) {
6978                                 ether_type =
6979                                         ((const struct rte_flow_item_vlan *)
6980                                          items->spec)->inner_type;
6981                                 ether_type &=
6982                                         ((const struct rte_flow_item_vlan *)
6983                                          items->mask)->inner_type;
6984                                 ether_type = rte_be_to_cpu_16(ether_type);
6985                         } else {
6986                                 ether_type = 0;
6987                         }
6988                         /* Store outer VLAN mask for of_push_vlan action. */
6989                         if (!tunnel)
6990                                 vlan_m = items->mask;
6991                         break;
6992                 case RTE_FLOW_ITEM_TYPE_IPV4:
6993                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6994                                                   &item_flags, &tunnel);
6995                         ret = flow_dv_validate_item_ipv4(dev, items, item_flags,
6996                                                          last_item, ether_type,
6997                                                          error);
6998                         if (ret < 0)
6999                                 return ret;
7000                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
7001                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
7002                         if (items->mask != NULL &&
7003                             ((const struct rte_flow_item_ipv4 *)
7004                              items->mask)->hdr.next_proto_id) {
7005                                 next_protocol =
7006                                         ((const struct rte_flow_item_ipv4 *)
7007                                          (items->spec))->hdr.next_proto_id;
7008                                 next_protocol &=
7009                                         ((const struct rte_flow_item_ipv4 *)
7010                                          (items->mask))->hdr.next_proto_id;
7011                         } else {
7012                                 /* Reset for inner layer. */
7013                                 next_protocol = 0xff;
7014                         }
7015                         break;
7016                 case RTE_FLOW_ITEM_TYPE_IPV6:
7017                         mlx5_flow_tunnel_ip_check(items, next_protocol,
7018                                                   &item_flags, &tunnel);
7019                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
7020                                                            last_item,
7021                                                            ether_type,
7022                                                            &nic_ipv6_mask,
7023                                                            error);
7024                         if (ret < 0)
7025                                 return ret;
7026                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
7027                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
7028                         if (items->mask != NULL &&
7029                             ((const struct rte_flow_item_ipv6 *)
7030                              items->mask)->hdr.proto) {
7031                                 item_ipv6_proto =
7032                                         ((const struct rte_flow_item_ipv6 *)
7033                                          items->spec)->hdr.proto;
7034                                 next_protocol =
7035                                         ((const struct rte_flow_item_ipv6 *)
7036                                          items->spec)->hdr.proto;
7037                                 next_protocol &=
7038                                         ((const struct rte_flow_item_ipv6 *)
7039                                          items->mask)->hdr.proto;
7040                         } else {
7041                                 /* Reset for inner layer. */
7042                                 next_protocol = 0xff;
7043                         }
7044                         break;
7045                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
7046                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
7047                                                                   item_flags,
7048                                                                   error);
7049                         if (ret < 0)
7050                                 return ret;
7051                         last_item = tunnel ?
7052                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
7053                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
7054                         if (items->mask != NULL &&
7055                             ((const struct rte_flow_item_ipv6_frag_ext *)
7056                              items->mask)->hdr.next_header) {
7057                                 next_protocol =
7058                                 ((const struct rte_flow_item_ipv6_frag_ext *)
7059                                  items->spec)->hdr.next_header;
7060                                 next_protocol &=
7061                                 ((const struct rte_flow_item_ipv6_frag_ext *)
7062                                  items->mask)->hdr.next_header;
7063                         } else {
7064                                 /* Reset for inner layer. */
7065                                 next_protocol = 0xff;
7066                         }
7067                         break;
7068                 case RTE_FLOW_ITEM_TYPE_TCP:
7069                         ret = mlx5_flow_validate_item_tcp
7070                                                 (items, item_flags,
7071                                                  next_protocol,
7072                                                  &nic_tcp_mask,
7073                                                  error);
7074                         if (ret < 0)
7075                                 return ret;
7076                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
7077                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
7078                         break;
7079                 case RTE_FLOW_ITEM_TYPE_UDP:
7080                         ret = mlx5_flow_validate_item_udp(items, item_flags,
7081                                                           next_protocol,
7082                                                           error);
7083                         const struct rte_flow_item_udp *spec = items->spec;
7084                         const struct rte_flow_item_udp *mask = items->mask;
7085                         if (!mask)
7086                                 mask = &rte_flow_item_udp_mask;
7087                         if (spec != NULL)
7088                                 udp_dport = rte_be_to_cpu_16
7089                                                 (spec->hdr.dst_port &
7090                                                  mask->hdr.dst_port);
7091                         if (ret < 0)
7092                                 return ret;
7093                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
7094                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
7095                         break;
7096                 case RTE_FLOW_ITEM_TYPE_GRE:
7097                         ret = mlx5_flow_validate_item_gre(items, item_flags,
7098                                                           next_protocol, error);
7099                         if (ret < 0)
7100                                 return ret;
7101                         gre_item = items;
7102                         last_item = MLX5_FLOW_LAYER_GRE;
7103                         break;
7104                 case RTE_FLOW_ITEM_TYPE_NVGRE:
7105                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
7106                                                             next_protocol,
7107                                                             error);
7108                         if (ret < 0)
7109                                 return ret;
7110                         last_item = MLX5_FLOW_LAYER_NVGRE;
7111                         break;
7112                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
7113                         ret = mlx5_flow_validate_item_gre_key
7114                                 (items, item_flags, gre_item, error);
7115                         if (ret < 0)
7116                                 return ret;
7117                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
7118                         break;
7119                 case RTE_FLOW_ITEM_TYPE_VXLAN:
7120                         ret = mlx5_flow_validate_item_vxlan(dev, udp_dport,
7121                                                             items, item_flags,
7122                                                             attr, error);
7123                         if (ret < 0)
7124                                 return ret;
7125                         last_item = MLX5_FLOW_LAYER_VXLAN;
7126                         break;
7127                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7128                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
7129                                                                 item_flags, dev,
7130                                                                 error);
7131                         if (ret < 0)
7132                                 return ret;
7133                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
7134                         break;
7135                 case RTE_FLOW_ITEM_TYPE_GENEVE:
7136                         ret = mlx5_flow_validate_item_geneve(items,
7137                                                              item_flags, dev,
7138                                                              error);
7139                         if (ret < 0)
7140                                 return ret;
7141                         geneve_item = items;
7142                         last_item = MLX5_FLOW_LAYER_GENEVE;
7143                         break;
7144                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
7145                         ret = mlx5_flow_validate_item_geneve_opt(items,
7146                                                                  last_item,
7147                                                                  geneve_item,
7148                                                                  dev,
7149                                                                  error);
7150                         if (ret < 0)
7151                                 return ret;
7152                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
7153                         break;
7154                 case RTE_FLOW_ITEM_TYPE_MPLS:
7155                         ret = mlx5_flow_validate_item_mpls(dev, items,
7156                                                            item_flags,
7157                                                            last_item, error);
7158                         if (ret < 0)
7159                                 return ret;
7160                         last_item = MLX5_FLOW_LAYER_MPLS;
7161                         break;
7162
7163                 case RTE_FLOW_ITEM_TYPE_MARK:
7164                         ret = flow_dv_validate_item_mark(dev, items, attr,
7165                                                          error);
7166                         if (ret < 0)
7167                                 return ret;
7168                         last_item = MLX5_FLOW_ITEM_MARK;
7169                         break;
7170                 case RTE_FLOW_ITEM_TYPE_META:
7171                         ret = flow_dv_validate_item_meta(dev, items, attr,
7172                                                          error);
7173                         if (ret < 0)
7174                                 return ret;
7175                         last_item = MLX5_FLOW_ITEM_METADATA;
7176                         break;
7177                 case RTE_FLOW_ITEM_TYPE_ICMP:
7178                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
7179                                                            next_protocol,
7180                                                            error);
7181                         if (ret < 0)
7182                                 return ret;
7183                         last_item = MLX5_FLOW_LAYER_ICMP;
7184                         break;
7185                 case RTE_FLOW_ITEM_TYPE_ICMP6:
7186                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
7187                                                             next_protocol,
7188                                                             error);
7189                         if (ret < 0)
7190                                 return ret;
7191                         item_ipv6_proto = IPPROTO_ICMPV6;
7192                         last_item = MLX5_FLOW_LAYER_ICMP6;
7193                         break;
7194                 case RTE_FLOW_ITEM_TYPE_TAG:
7195                         ret = flow_dv_validate_item_tag(dev, items,
7196                                                         attr, error);
7197                         if (ret < 0)
7198                                 return ret;
7199                         last_item = MLX5_FLOW_ITEM_TAG;
7200                         break;
7201                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
7202                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
7203                         break;
7204                 case RTE_FLOW_ITEM_TYPE_GTP:
7205                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
7206                                                         error);
7207                         if (ret < 0)
7208                                 return ret;
7209                         gtp_item = items;
7210                         last_item = MLX5_FLOW_LAYER_GTP;
7211                         break;
7212                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
7213                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
7214                                                             gtp_item, attr,
7215                                                             error);
7216                         if (ret < 0)
7217                                 return ret;
7218                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
7219                         break;
7220                 case RTE_FLOW_ITEM_TYPE_ECPRI:
7221                         /* Capacity will be checked in the translate stage. */
7222                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
7223                                                             last_item,
7224                                                             ether_type,
7225                                                             &nic_ecpri_mask,
7226                                                             error);
7227                         if (ret < 0)
7228                                 return ret;
7229                         last_item = MLX5_FLOW_LAYER_ECPRI;
7230                         break;
7231                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
7232                         ret = flow_dv_validate_item_integrity(dev, items,
7233                                                               item_flags,
7234                                                               &last_item,
7235                                                               integrity_items,
7236                                                               error);
7237                         if (ret < 0)
7238                                 return ret;
7239                         break;
7240                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
7241                         ret = flow_dv_validate_item_aso_ct(dev, items,
7242                                                            &item_flags, error);
7243                         if (ret < 0)
7244                                 return ret;
7245                         break;
7246                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
7247                         /* tunnel offload item was processed before
7248                          * list it here as a supported type
7249                          */
7250                         break;
7251                 case RTE_FLOW_ITEM_TYPE_FLEX:
7252                         ret = flow_dv_validate_item_flex(dev, items, item_flags,
7253                                                          &last_item,
7254                                                          tunnel != 0, error);
7255                         if (ret < 0)
7256                                 return ret;
7257                         break;
7258                 default:
7259                         return rte_flow_error_set(error, ENOTSUP,
7260                                                   RTE_FLOW_ERROR_TYPE_ITEM,
7261                                                   NULL, "item not supported");
7262                 }
7263                 item_flags |= last_item;
7264         }
7265         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
7266                 ret = flow_dv_validate_item_integrity_post(integrity_items,
7267                                                            item_flags, error);
7268                 if (ret)
7269                         return ret;
7270         }
7271         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
7272                 int type = actions->type;
7273                 bool shared_count = false;
7274
7275                 if (!mlx5_flow_os_action_supported(type))
7276                         return rte_flow_error_set(error, ENOTSUP,
7277                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7278                                                   actions,
7279                                                   "action not supported");
7280                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
7281                         return rte_flow_error_set(error, ENOTSUP,
7282                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7283                                                   actions, "too many actions");
7284                 if (action_flags &
7285                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
7286                         return rte_flow_error_set(error, ENOTSUP,
7287                                 RTE_FLOW_ERROR_TYPE_ACTION,
7288                                 NULL, "meter action with policy "
7289                                 "must be the last action");
7290                 switch (type) {
7291                 case RTE_FLOW_ACTION_TYPE_VOID:
7292                         break;
7293                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7294                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
7295                         ret = flow_dv_validate_action_port_id(dev,
7296                                                               action_flags,
7297                                                               actions,
7298                                                               attr,
7299                                                               error);
7300                         if (ret)
7301                                 return ret;
7302                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7303                         ++actions_n;
7304                         break;
7305                 case RTE_FLOW_ACTION_TYPE_FLAG:
7306                         ret = flow_dv_validate_action_flag(dev, action_flags,
7307                                                            attr, error);
7308                         if (ret < 0)
7309                                 return ret;
7310                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7311                                 /* Count all modify-header actions as one. */
7312                                 if (!(action_flags &
7313                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7314                                         ++actions_n;
7315                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
7316                                                 MLX5_FLOW_ACTION_MARK_EXT;
7317                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7318                                         modify_after_mirror = 1;
7319
7320                         } else {
7321                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
7322                                 ++actions_n;
7323                         }
7324                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7325                         break;
7326                 case RTE_FLOW_ACTION_TYPE_MARK:
7327                         ret = flow_dv_validate_action_mark(dev, actions,
7328                                                            action_flags,
7329                                                            attr, error);
7330                         if (ret < 0)
7331                                 return ret;
7332                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7333                                 /* Count all modify-header actions as one. */
7334                                 if (!(action_flags &
7335                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7336                                         ++actions_n;
7337                                 action_flags |= MLX5_FLOW_ACTION_MARK |
7338                                                 MLX5_FLOW_ACTION_MARK_EXT;
7339                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7340                                         modify_after_mirror = 1;
7341                         } else {
7342                                 action_flags |= MLX5_FLOW_ACTION_MARK;
7343                                 ++actions_n;
7344                         }
7345                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7346                         break;
7347                 case RTE_FLOW_ACTION_TYPE_SET_META:
7348                         ret = flow_dv_validate_action_set_meta(dev, actions,
7349                                                                action_flags,
7350                                                                attr, error);
7351                         if (ret < 0)
7352                                 return ret;
7353                         /* Count all modify-header actions as one action. */
7354                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7355                                 ++actions_n;
7356                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7357                                 modify_after_mirror = 1;
7358                         action_flags |= MLX5_FLOW_ACTION_SET_META;
7359                         rw_act_num += MLX5_ACT_NUM_SET_META;
7360                         break;
7361                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7362                         ret = flow_dv_validate_action_set_tag(dev, actions,
7363                                                               action_flags,
7364                                                               attr, error);
7365                         if (ret < 0)
7366                                 return ret;
7367                         /* Count all modify-header actions as one action. */
7368                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7369                                 ++actions_n;
7370                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7371                                 modify_after_mirror = 1;
7372                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7373                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7374                         break;
7375                 case RTE_FLOW_ACTION_TYPE_DROP:
7376                         ret = mlx5_flow_validate_action_drop(action_flags,
7377                                                              attr, error);
7378                         if (ret < 0)
7379                                 return ret;
7380                         action_flags |= MLX5_FLOW_ACTION_DROP;
7381                         ++actions_n;
7382                         break;
7383                 case RTE_FLOW_ACTION_TYPE_QUEUE:
7384                         ret = mlx5_flow_validate_action_queue(actions,
7385                                                               action_flags, dev,
7386                                                               attr, error);
7387                         if (ret < 0)
7388                                 return ret;
7389                         queue_index = ((const struct rte_flow_action_queue *)
7390                                                         (actions->conf))->index;
7391                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
7392                         ++actions_n;
7393                         break;
7394                 case RTE_FLOW_ACTION_TYPE_RSS:
7395                         rss = actions->conf;
7396                         ret = mlx5_flow_validate_action_rss(actions,
7397                                                             action_flags, dev,
7398                                                             attr, item_flags,
7399                                                             error);
7400                         if (ret < 0)
7401                                 return ret;
7402                         if (rss && sample_rss &&
7403                             (sample_rss->level != rss->level ||
7404                             sample_rss->types != rss->types))
7405                                 return rte_flow_error_set(error, ENOTSUP,
7406                                         RTE_FLOW_ERROR_TYPE_ACTION,
7407                                         NULL,
7408                                         "Can't use the different RSS types "
7409                                         "or level in the same flow");
7410                         if (rss != NULL && rss->queue_num)
7411                                 queue_index = rss->queue[0];
7412                         action_flags |= MLX5_FLOW_ACTION_RSS;
7413                         ++actions_n;
7414                         break;
7415                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7416                         ret =
7417                         mlx5_flow_validate_action_default_miss(action_flags,
7418                                         attr, error);
7419                         if (ret < 0)
7420                                 return ret;
7421                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7422                         ++actions_n;
7423                         break;
7424                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
7425                         shared_count = true;
7426                         /* fall-through. */
7427                 case RTE_FLOW_ACTION_TYPE_COUNT:
7428                         ret = flow_dv_validate_action_count(dev, shared_count,
7429                                                             action_flags,
7430                                                             error);
7431                         if (ret < 0)
7432                                 return ret;
7433                         action_flags |= MLX5_FLOW_ACTION_COUNT;
7434                         ++actions_n;
7435                         break;
7436                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7437                         if (flow_dv_validate_action_pop_vlan(dev,
7438                                                              action_flags,
7439                                                              actions,
7440                                                              item_flags, attr,
7441                                                              error))
7442                                 return -rte_errno;
7443                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7444                                 modify_after_mirror = 1;
7445                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7446                         ++actions_n;
7447                         break;
7448                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7449                         ret = flow_dv_validate_action_push_vlan(dev,
7450                                                                 action_flags,
7451                                                                 vlan_m,
7452                                                                 actions, attr,
7453                                                                 error);
7454                         if (ret < 0)
7455                                 return ret;
7456                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7457                                 modify_after_mirror = 1;
7458                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7459                         ++actions_n;
7460                         break;
7461                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7462                         ret = flow_dv_validate_action_set_vlan_pcp
7463                                                 (action_flags, actions, error);
7464                         if (ret < 0)
7465                                 return ret;
7466                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7467                                 modify_after_mirror = 1;
7468                         /* Count PCP with push_vlan command. */
7469                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
7470                         break;
7471                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7472                         ret = flow_dv_validate_action_set_vlan_vid
7473                                                 (item_flags, action_flags,
7474                                                  actions, error);
7475                         if (ret < 0)
7476                                 return ret;
7477                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7478                                 modify_after_mirror = 1;
7479                         /* Count VID with push_vlan command. */
7480                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7481                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
7482                         break;
7483                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7484                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7485                         ret = flow_dv_validate_action_l2_encap(dev,
7486                                                                action_flags,
7487                                                                actions, attr,
7488                                                                error);
7489                         if (ret < 0)
7490                                 return ret;
7491                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
7492                         ++actions_n;
7493                         break;
7494                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7495                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7496                         ret = flow_dv_validate_action_decap(dev, action_flags,
7497                                                             actions, item_flags,
7498                                                             attr, error);
7499                         if (ret < 0)
7500                                 return ret;
7501                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7502                                 modify_after_mirror = 1;
7503                         action_flags |= MLX5_FLOW_ACTION_DECAP;
7504                         ++actions_n;
7505                         break;
7506                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7507                         ret = flow_dv_validate_action_raw_encap_decap
7508                                 (dev, NULL, actions->conf, attr, &action_flags,
7509                                  &actions_n, actions, item_flags, error);
7510                         if (ret < 0)
7511                                 return ret;
7512                         break;
7513                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7514                         decap = actions->conf;
7515                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
7516                                 ;
7517                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7518                                 encap = NULL;
7519                                 actions--;
7520                         } else {
7521                                 encap = actions->conf;
7522                         }
7523                         ret = flow_dv_validate_action_raw_encap_decap
7524                                            (dev,
7525                                             decap ? decap : &empty_decap, encap,
7526                                             attr, &action_flags, &actions_n,
7527                                             actions, item_flags, error);
7528                         if (ret < 0)
7529                                 return ret;
7530                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7531                             (action_flags & MLX5_FLOW_ACTION_DECAP))
7532                                 modify_after_mirror = 1;
7533                         break;
7534                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7535                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7536                         ret = flow_dv_validate_action_modify_mac(action_flags,
7537                                                                  actions,
7538                                                                  item_flags,
7539                                                                  error);
7540                         if (ret < 0)
7541                                 return ret;
7542                         /* Count all modify-header actions as one action. */
7543                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7544                                 ++actions_n;
7545                         action_flags |= actions->type ==
7546                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7547                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
7548                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
7549                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7550                                 modify_after_mirror = 1;
7551                         /*
7552                          * Even if the source and destination MAC addresses have
7553                          * overlap in the header with 4B alignment, the convert
7554                          * function will handle them separately and 4 SW actions
7555                          * will be created. And 2 actions will be added each
7556                          * time no matter how many bytes of address will be set.
7557                          */
7558                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
7559                         break;
7560                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7561                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7562                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
7563                                                                   actions,
7564                                                                   item_flags,
7565                                                                   error);
7566                         if (ret < 0)
7567                                 return ret;
7568                         /* Count all modify-header actions as one action. */
7569                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7570                                 ++actions_n;
7571                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7572                                 modify_after_mirror = 1;
7573                         action_flags |= actions->type ==
7574                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7575                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
7576                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
7577                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
7578                         break;
7579                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7580                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7581                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
7582                                                                   actions,
7583                                                                   item_flags,
7584                                                                   error);
7585                         if (ret < 0)
7586                                 return ret;
7587                         if (item_ipv6_proto == IPPROTO_ICMPV6)
7588                                 return rte_flow_error_set(error, ENOTSUP,
7589                                         RTE_FLOW_ERROR_TYPE_ACTION,
7590                                         actions,
7591                                         "Can't change header "
7592                                         "with ICMPv6 proto");
7593                         /* Count all modify-header actions as one action. */
7594                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7595                                 ++actions_n;
7596                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7597                                 modify_after_mirror = 1;
7598                         action_flags |= actions->type ==
7599                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7600                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7601                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
7602                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
7603                         break;
7604                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7605                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7606                         ret = flow_dv_validate_action_modify_tp(action_flags,
7607                                                                 actions,
7608                                                                 item_flags,
7609                                                                 error);
7610                         if (ret < 0)
7611                                 return ret;
7612                         /* Count all modify-header actions as one action. */
7613                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7614                                 ++actions_n;
7615                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7616                                 modify_after_mirror = 1;
7617                         action_flags |= actions->type ==
7618                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7619                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
7620                                                 MLX5_FLOW_ACTION_SET_TP_DST;
7621                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7622                         break;
7623                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7624                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7625                         ret = flow_dv_validate_action_modify_ttl(action_flags,
7626                                                                  actions,
7627                                                                  item_flags,
7628                                                                  error);
7629                         if (ret < 0)
7630                                 return ret;
7631                         /* Count all modify-header actions as one action. */
7632                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7633                                 ++actions_n;
7634                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7635                                 modify_after_mirror = 1;
7636                         action_flags |= actions->type ==
7637                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
7638                                                 MLX5_FLOW_ACTION_SET_TTL :
7639                                                 MLX5_FLOW_ACTION_DEC_TTL;
7640                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7641                         break;
7642                 case RTE_FLOW_ACTION_TYPE_JUMP:
7643                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7644                                                            action_flags,
7645                                                            attr, external,
7646                                                            error);
7647                         if (ret)
7648                                 return ret;
7649                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7650                             fdb_mirror_limit)
7651                                 return rte_flow_error_set(error, EINVAL,
7652                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7653                                                   NULL,
7654                                                   "sample and jump action combination is not supported");
7655                         ++actions_n;
7656                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7657                         break;
7658                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7659                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7660                         ret = flow_dv_validate_action_modify_tcp_seq
7661                                                                 (action_flags,
7662                                                                  actions,
7663                                                                  item_flags,
7664                                                                  error);
7665                         if (ret < 0)
7666                                 return ret;
7667                         /* Count all modify-header actions as one action. */
7668                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7669                                 ++actions_n;
7670                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7671                                 modify_after_mirror = 1;
7672                         action_flags |= actions->type ==
7673                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7674                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7675                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7676                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7677                         break;
7678                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7679                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7680                         ret = flow_dv_validate_action_modify_tcp_ack
7681                                                                 (action_flags,
7682                                                                  actions,
7683                                                                  item_flags,
7684                                                                  error);
7685                         if (ret < 0)
7686                                 return ret;
7687                         /* Count all modify-header actions as one action. */
7688                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7689                                 ++actions_n;
7690                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7691                                 modify_after_mirror = 1;
7692                         action_flags |= actions->type ==
7693                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7694                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
7695                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7696                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7697                         break;
7698                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7699                         break;
7700                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7701                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7702                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7703                         break;
7704                 case RTE_FLOW_ACTION_TYPE_METER:
7705                         ret = mlx5_flow_validate_action_meter(dev,
7706                                                               action_flags,
7707                                                               item_flags,
7708                                                               actions, attr,
7709                                                               port_id_item,
7710                                                               &def_policy,
7711                                                               error);
7712                         if (ret < 0)
7713                                 return ret;
7714                         action_flags |= MLX5_FLOW_ACTION_METER;
7715                         if (!def_policy)
7716                                 action_flags |=
7717                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7718                         ++actions_n;
7719                         /* Meter action will add one more TAG action. */
7720                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7721                         break;
7722                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7723                         if (!attr->transfer && !attr->group)
7724                                 return rte_flow_error_set(error, ENOTSUP,
7725                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7726                                                                            NULL,
7727                           "Shared ASO age action is not supported for group 0");
7728                         if (action_flags & MLX5_FLOW_ACTION_AGE)
7729                                 return rte_flow_error_set
7730                                                   (error, EINVAL,
7731                                                    RTE_FLOW_ERROR_TYPE_ACTION,
7732                                                    NULL,
7733                                                    "duplicate age actions set");
7734                         action_flags |= MLX5_FLOW_ACTION_AGE;
7735                         ++actions_n;
7736                         break;
7737                 case RTE_FLOW_ACTION_TYPE_AGE:
7738                         ret = flow_dv_validate_action_age(action_flags,
7739                                                           actions, dev,
7740                                                           error);
7741                         if (ret < 0)
7742                                 return ret;
7743                         /*
7744                          * Validate the regular AGE action (using counter)
7745                          * mutual exclusion with share counter actions.
7746                          */
7747                         if (!priv->sh->flow_hit_aso_en) {
7748                                 if (shared_count)
7749                                         return rte_flow_error_set
7750                                                 (error, EINVAL,
7751                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7752                                                 NULL,
7753                                                 "old age and shared count combination is not supported");
7754                                 if (sample_count)
7755                                         return rte_flow_error_set
7756                                                 (error, EINVAL,
7757                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7758                                                 NULL,
7759                                                 "old age action and count must be in the same sub flow");
7760                         }
7761                         action_flags |= MLX5_FLOW_ACTION_AGE;
7762                         ++actions_n;
7763                         break;
7764                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7765                         ret = flow_dv_validate_action_modify_ipv4_dscp
7766                                                          (action_flags,
7767                                                           actions,
7768                                                           item_flags,
7769                                                           error);
7770                         if (ret < 0)
7771                                 return ret;
7772                         /* Count all modify-header actions as one action. */
7773                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7774                                 ++actions_n;
7775                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7776                                 modify_after_mirror = 1;
7777                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7778                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7779                         break;
7780                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7781                         ret = flow_dv_validate_action_modify_ipv6_dscp
7782                                                                 (action_flags,
7783                                                                  actions,
7784                                                                  item_flags,
7785                                                                  error);
7786                         if (ret < 0)
7787                                 return ret;
7788                         /* Count all modify-header actions as one action. */
7789                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7790                                 ++actions_n;
7791                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7792                                 modify_after_mirror = 1;
7793                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7794                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7795                         break;
7796                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7797                         ret = flow_dv_validate_action_sample(&action_flags,
7798                                                              actions, dev,
7799                                                              attr, item_flags,
7800                                                              rss, &sample_rss,
7801                                                              &sample_count,
7802                                                              &fdb_mirror_limit,
7803                                                              error);
7804                         if (ret < 0)
7805                                 return ret;
7806                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7807                         ++actions_n;
7808                         break;
7809                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7810                         ret = flow_dv_validate_action_modify_field(dev,
7811                                                                    action_flags,
7812                                                                    actions,
7813                                                                    attr,
7814                                                                    error);
7815                         if (ret < 0)
7816                                 return ret;
7817                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7818                                 modify_after_mirror = 1;
7819                         /* Count all modify-header actions as one action. */
7820                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7821                                 ++actions_n;
7822                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7823                         rw_act_num += ret;
7824                         break;
7825                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7826                         ret = flow_dv_validate_action_aso_ct(dev, action_flags,
7827                                                              item_flags, attr,
7828                                                              error);
7829                         if (ret < 0)
7830                                 return ret;
7831                         action_flags |= MLX5_FLOW_ACTION_CT;
7832                         break;
7833                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7834                         /* tunnel offload action was processed before
7835                          * list it here as a supported type
7836                          */
7837                         break;
7838                 default:
7839                         return rte_flow_error_set(error, ENOTSUP,
7840                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7841                                                   actions,
7842                                                   "action not supported");
7843                 }
7844         }
7845         /*
7846          * Validate actions in flow rules
7847          * - Explicit decap action is prohibited by the tunnel offload API.
7848          * - Drop action in tunnel steer rule is prohibited by the API.
7849          * - Application cannot use MARK action because it's value can mask
7850          *   tunnel default miss notification.
7851          * - JUMP in tunnel match rule has no support in current PMD
7852          *   implementation.
7853          * - TAG & META are reserved for future uses.
7854          */
7855         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7856                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
7857                                             MLX5_FLOW_ACTION_MARK     |
7858                                             MLX5_FLOW_ACTION_SET_TAG  |
7859                                             MLX5_FLOW_ACTION_SET_META |
7860                                             MLX5_FLOW_ACTION_DROP;
7861
7862                 if (action_flags & bad_actions_mask)
7863                         return rte_flow_error_set
7864                                         (error, EINVAL,
7865                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7866                                         "Invalid RTE action in tunnel "
7867                                         "set decap rule");
7868                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7869                         return rte_flow_error_set
7870                                         (error, EINVAL,
7871                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7872                                         "tunnel set decap rule must terminate "
7873                                         "with JUMP");
7874                 if (!attr->ingress)
7875                         return rte_flow_error_set
7876                                         (error, EINVAL,
7877                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7878                                         "tunnel flows for ingress traffic only");
7879         }
7880         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7881                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
7882                                             MLX5_FLOW_ACTION_MARK    |
7883                                             MLX5_FLOW_ACTION_SET_TAG |
7884                                             MLX5_FLOW_ACTION_SET_META;
7885
7886                 if (action_flags & bad_actions_mask)
7887                         return rte_flow_error_set
7888                                         (error, EINVAL,
7889                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7890                                         "Invalid RTE action in tunnel "
7891                                         "set match rule");
7892         }
7893         /*
7894          * Validate the drop action mutual exclusion with other actions.
7895          * Drop action is mutually-exclusive with any other action, except for
7896          * Count action.
7897          * Drop action compatibility with tunnel offload was already validated.
7898          */
7899         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7900                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
7901         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7902             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7903                 return rte_flow_error_set(error, EINVAL,
7904                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7905                                           "Drop action is mutually-exclusive "
7906                                           "with any other action, except for "
7907                                           "Count action");
7908         /* Eswitch has few restrictions on using items and actions */
7909         if (attr->transfer) {
7910                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7911                     action_flags & MLX5_FLOW_ACTION_FLAG)
7912                         return rte_flow_error_set(error, ENOTSUP,
7913                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7914                                                   NULL,
7915                                                   "unsupported action FLAG");
7916                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7917                     action_flags & MLX5_FLOW_ACTION_MARK)
7918                         return rte_flow_error_set(error, ENOTSUP,
7919                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7920                                                   NULL,
7921                                                   "unsupported action MARK");
7922                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7923                         return rte_flow_error_set(error, ENOTSUP,
7924                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7925                                                   NULL,
7926                                                   "unsupported action QUEUE");
7927                 if (action_flags & MLX5_FLOW_ACTION_RSS)
7928                         return rte_flow_error_set(error, ENOTSUP,
7929                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7930                                                   NULL,
7931                                                   "unsupported action RSS");
7932                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7933                         return rte_flow_error_set(error, EINVAL,
7934                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7935                                                   actions,
7936                                                   "no fate action is found");
7937         } else {
7938                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7939                         return rte_flow_error_set(error, EINVAL,
7940                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7941                                                   actions,
7942                                                   "no fate action is found");
7943         }
7944         /*
7945          * Continue validation for Xcap and VLAN actions.
7946          * If hairpin is working in explicit TX rule mode, there is no actions
7947          * splitting and the validation of hairpin ingress flow should be the
7948          * same as other standard flows.
7949          */
7950         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7951                              MLX5_FLOW_VLAN_ACTIONS)) &&
7952             (queue_index == 0xFFFF ||
7953              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7954              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7955              conf->tx_explicit != 0))) {
7956                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7957                     MLX5_FLOW_XCAP_ACTIONS)
7958                         return rte_flow_error_set(error, ENOTSUP,
7959                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7960                                                   NULL, "encap and decap "
7961                                                   "combination aren't supported");
7962                 if (!attr->transfer && attr->ingress) {
7963                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7964                                 return rte_flow_error_set
7965                                                 (error, ENOTSUP,
7966                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7967                                                  NULL, "encap is not supported"
7968                                                  " for ingress traffic");
7969                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7970                                 return rte_flow_error_set
7971                                                 (error, ENOTSUP,
7972                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7973                                                  NULL, "push VLAN action not "
7974                                                  "supported for ingress");
7975                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7976                                         MLX5_FLOW_VLAN_ACTIONS)
7977                                 return rte_flow_error_set
7978                                                 (error, ENOTSUP,
7979                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7980                                                  NULL, "no support for "
7981                                                  "multiple VLAN actions");
7982                 }
7983         }
7984         if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
7985                 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
7986                         ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
7987                         attr->ingress)
7988                         return rte_flow_error_set
7989                                 (error, ENOTSUP,
7990                                 RTE_FLOW_ERROR_TYPE_ACTION,
7991                                 NULL, "fate action not supported for "
7992                                 "meter with policy");
7993                 if (attr->egress) {
7994                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
7995                                 return rte_flow_error_set
7996                                         (error, ENOTSUP,
7997                                         RTE_FLOW_ERROR_TYPE_ACTION,
7998                                         NULL, "modify header action in egress "
7999                                         "cannot be done before meter action");
8000                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
8001                                 return rte_flow_error_set
8002                                         (error, ENOTSUP,
8003                                         RTE_FLOW_ERROR_TYPE_ACTION,
8004                                         NULL, "encap action in egress "
8005                                         "cannot be done before meter action");
8006                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
8007                                 return rte_flow_error_set
8008                                         (error, ENOTSUP,
8009                                         RTE_FLOW_ERROR_TYPE_ACTION,
8010                                         NULL, "push vlan action in egress "
8011                                         "cannot be done before meter action");
8012                 }
8013         }
8014         /*
8015          * Hairpin flow will add one more TAG action in TX implicit mode.
8016          * In TX explicit mode, there will be no hairpin flow ID.
8017          */
8018         if (hairpin > 0)
8019                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
8020         /* extra metadata enabled: one more TAG action will be add. */
8021         if (dev_conf->dv_flow_en &&
8022             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
8023             mlx5_flow_ext_mreg_supported(dev))
8024                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
8025         if (rw_act_num >
8026                         flow_dv_modify_hdr_action_max(dev, is_root)) {
8027                 return rte_flow_error_set(error, ENOTSUP,
8028                                           RTE_FLOW_ERROR_TYPE_ACTION,
8029                                           NULL, "too many header modify"
8030                                           " actions to support");
8031         }
8032         /* Eswitch egress mirror and modify flow has limitation on CX5 */
8033         if (fdb_mirror_limit && modify_after_mirror)
8034                 return rte_flow_error_set(error, EINVAL,
8035                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
8036                                 "sample before modify action is not supported");
8037         return 0;
8038 }
8039
8040 /**
8041  * Internal preparation function. Allocates the DV flow size,
8042  * this size is constant.
8043  *
8044  * @param[in] dev
8045  *   Pointer to the rte_eth_dev structure.
8046  * @param[in] attr
8047  *   Pointer to the flow attributes.
8048  * @param[in] items
8049  *   Pointer to the list of items.
8050  * @param[in] actions
8051  *   Pointer to the list of actions.
8052  * @param[out] error
8053  *   Pointer to the error structure.
8054  *
8055  * @return
8056  *   Pointer to mlx5_flow object on success,
8057  *   otherwise NULL and rte_errno is set.
8058  */
8059 static struct mlx5_flow *
8060 flow_dv_prepare(struct rte_eth_dev *dev,
8061                 const struct rte_flow_attr *attr __rte_unused,
8062                 const struct rte_flow_item items[] __rte_unused,
8063                 const struct rte_flow_action actions[] __rte_unused,
8064                 struct rte_flow_error *error)
8065 {
8066         uint32_t handle_idx = 0;
8067         struct mlx5_flow *dev_flow;
8068         struct mlx5_flow_handle *dev_handle;
8069         struct mlx5_priv *priv = dev->data->dev_private;
8070         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
8071
8072         MLX5_ASSERT(wks);
8073         wks->skip_matcher_reg = 0;
8074         wks->policy = NULL;
8075         wks->final_policy = NULL;
8076         /* In case of corrupting the memory. */
8077         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
8078                 rte_flow_error_set(error, ENOSPC,
8079                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8080                                    "not free temporary device flow");
8081                 return NULL;
8082         }
8083         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
8084                                    &handle_idx);
8085         if (!dev_handle) {
8086                 rte_flow_error_set(error, ENOMEM,
8087                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8088                                    "not enough memory to create flow handle");
8089                 return NULL;
8090         }
8091         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
8092         dev_flow = &wks->flows[wks->flow_idx++];
8093         memset(dev_flow, 0, sizeof(*dev_flow));
8094         dev_flow->handle = dev_handle;
8095         dev_flow->handle_idx = handle_idx;
8096         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
8097         dev_flow->ingress = attr->ingress;
8098         dev_flow->dv.transfer = attr->transfer;
8099         return dev_flow;
8100 }
8101
8102 #ifdef RTE_LIBRTE_MLX5_DEBUG
8103 /**
8104  * Sanity check for match mask and value. Similar to check_valid_spec() in
8105  * kernel driver. If unmasked bit is present in value, it returns failure.
8106  *
8107  * @param match_mask
8108  *   pointer to match mask buffer.
8109  * @param match_value
8110  *   pointer to match value buffer.
8111  *
8112  * @return
8113  *   0 if valid, -EINVAL otherwise.
8114  */
8115 static int
8116 flow_dv_check_valid_spec(void *match_mask, void *match_value)
8117 {
8118         uint8_t *m = match_mask;
8119         uint8_t *v = match_value;
8120         unsigned int i;
8121
8122         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
8123                 if (v[i] & ~m[i]) {
8124                         DRV_LOG(ERR,
8125                                 "match_value differs from match_criteria"
8126                                 " %p[%u] != %p[%u]",
8127                                 match_value, i, match_mask, i);
8128                         return -EINVAL;
8129                 }
8130         }
8131         return 0;
8132 }
8133 #endif
8134
8135 /**
8136  * Add match of ip_version.
8137  *
8138  * @param[in] group
8139  *   Flow group.
8140  * @param[in] headers_v
8141  *   Values header pointer.
8142  * @param[in] headers_m
8143  *   Masks header pointer.
8144  * @param[in] ip_version
8145  *   The IP version to set.
8146  */
8147 static inline void
8148 flow_dv_set_match_ip_version(uint32_t group,
8149                              void *headers_v,
8150                              void *headers_m,
8151                              uint8_t ip_version)
8152 {
8153         if (group == 0)
8154                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
8155         else
8156                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
8157                          ip_version);
8158         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
8159         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
8160         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
8161 }
8162
8163 /**
8164  * Add Ethernet item to matcher and to the value.
8165  *
8166  * @param[in, out] matcher
8167  *   Flow matcher.
8168  * @param[in, out] key
8169  *   Flow matcher value.
8170  * @param[in] item
8171  *   Flow pattern to translate.
8172  * @param[in] inner
8173  *   Item is inner pattern.
8174  */
8175 static void
8176 flow_dv_translate_item_eth(void *matcher, void *key,
8177                            const struct rte_flow_item *item, int inner,
8178                            uint32_t group)
8179 {
8180         const struct rte_flow_item_eth *eth_m = item->mask;
8181         const struct rte_flow_item_eth *eth_v = item->spec;
8182         const struct rte_flow_item_eth nic_mask = {
8183                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8184                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8185                 .type = RTE_BE16(0xffff),
8186                 .has_vlan = 0,
8187         };
8188         void *hdrs_m;
8189         void *hdrs_v;
8190         char *l24_v;
8191         unsigned int i;
8192
8193         if (!eth_v)
8194                 return;
8195         if (!eth_m)
8196                 eth_m = &nic_mask;
8197         if (inner) {
8198                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8199                                          inner_headers);
8200                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8201         } else {
8202                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8203                                          outer_headers);
8204                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8205         }
8206         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
8207                &eth_m->dst, sizeof(eth_m->dst));
8208         /* The value must be in the range of the mask. */
8209         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
8210         for (i = 0; i < sizeof(eth_m->dst); ++i)
8211                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
8212         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
8213                &eth_m->src, sizeof(eth_m->src));
8214         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
8215         /* The value must be in the range of the mask. */
8216         for (i = 0; i < sizeof(eth_m->dst); ++i)
8217                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
8218         /*
8219          * HW supports match on one Ethertype, the Ethertype following the last
8220          * VLAN tag of the packet (see PRM).
8221          * Set match on ethertype only if ETH header is not followed by VLAN.
8222          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8223          * ethertype, and use ip_version field instead.
8224          * eCPRI over Ether layer will use type value 0xAEFE.
8225          */
8226         if (eth_m->type == 0xFFFF) {
8227                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
8228                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8229                 switch (eth_v->type) {
8230                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8231                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8232                         return;
8233                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
8234                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8235                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8236                         return;
8237                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8238                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8239                         return;
8240                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8241                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8242                         return;
8243                 default:
8244                         break;
8245                 }
8246         }
8247         if (eth_m->has_vlan) {
8248                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8249                 if (eth_v->has_vlan) {
8250                         /*
8251                          * Here, when also has_more_vlan field in VLAN item is
8252                          * not set, only single-tagged packets will be matched.
8253                          */
8254                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8255                         return;
8256                 }
8257         }
8258         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8259                  rte_be_to_cpu_16(eth_m->type));
8260         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
8261         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
8262 }
8263
8264 /**
8265  * Add VLAN item to matcher and to the value.
8266  *
8267  * @param[in, out] dev_flow
8268  *   Flow descriptor.
8269  * @param[in, out] matcher
8270  *   Flow matcher.
8271  * @param[in, out] key
8272  *   Flow matcher value.
8273  * @param[in] item
8274  *   Flow pattern to translate.
8275  * @param[in] inner
8276  *   Item is inner pattern.
8277  */
8278 static void
8279 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
8280                             void *matcher, void *key,
8281                             const struct rte_flow_item *item,
8282                             int inner, uint32_t group)
8283 {
8284         const struct rte_flow_item_vlan *vlan_m = item->mask;
8285         const struct rte_flow_item_vlan *vlan_v = item->spec;
8286         void *hdrs_m;
8287         void *hdrs_v;
8288         uint16_t tci_m;
8289         uint16_t tci_v;
8290
8291         if (inner) {
8292                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8293                                          inner_headers);
8294                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8295         } else {
8296                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8297                                          outer_headers);
8298                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8299                 /*
8300                  * This is workaround, masks are not supported,
8301                  * and pre-validated.
8302                  */
8303                 if (vlan_v)
8304                         dev_flow->handle->vf_vlan.tag =
8305                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
8306         }
8307         /*
8308          * When VLAN item exists in flow, mark packet as tagged,
8309          * even if TCI is not specified.
8310          */
8311         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
8312                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8313                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8314         }
8315         if (!vlan_v)
8316                 return;
8317         if (!vlan_m)
8318                 vlan_m = &rte_flow_item_vlan_mask;
8319         tci_m = rte_be_to_cpu_16(vlan_m->tci);
8320         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
8321         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
8322         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
8323         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
8324         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
8325         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
8326         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
8327         /*
8328          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8329          * ethertype, and use ip_version field instead.
8330          */
8331         if (vlan_m->inner_type == 0xFFFF) {
8332                 switch (vlan_v->inner_type) {
8333                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8334                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8335                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8336                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8337                         return;
8338                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8339                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8340                         return;
8341                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8342                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8343                         return;
8344                 default:
8345                         break;
8346                 }
8347         }
8348         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
8349                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8350                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8351                 /* Only one vlan_tag bit can be set. */
8352                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8353                 return;
8354         }
8355         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8356                  rte_be_to_cpu_16(vlan_m->inner_type));
8357         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
8358                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
8359 }
8360
8361 /**
8362  * Add IPV4 item to matcher and to the value.
8363  *
8364  * @param[in, out] matcher
8365  *   Flow matcher.
8366  * @param[in, out] key
8367  *   Flow matcher value.
8368  * @param[in] item
8369  *   Flow pattern to translate.
8370  * @param[in] inner
8371  *   Item is inner pattern.
8372  * @param[in] group
8373  *   The group to insert the rule.
8374  */
8375 static void
8376 flow_dv_translate_item_ipv4(void *matcher, void *key,
8377                             const struct rte_flow_item *item,
8378                             int inner, uint32_t group)
8379 {
8380         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
8381         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
8382         const struct rte_flow_item_ipv4 nic_mask = {
8383                 .hdr = {
8384                         .src_addr = RTE_BE32(0xffffffff),
8385                         .dst_addr = RTE_BE32(0xffffffff),
8386                         .type_of_service = 0xff,
8387                         .next_proto_id = 0xff,
8388                         .time_to_live = 0xff,
8389                 },
8390         };
8391         void *headers_m;
8392         void *headers_v;
8393         char *l24_m;
8394         char *l24_v;
8395         uint8_t tos, ihl_m, ihl_v;
8396
8397         if (inner) {
8398                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8399                                          inner_headers);
8400                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8401         } else {
8402                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8403                                          outer_headers);
8404                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8405         }
8406         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
8407         if (!ipv4_v)
8408                 return;
8409         if (!ipv4_m)
8410                 ipv4_m = &nic_mask;
8411         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8412                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8413         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8414                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8415         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
8416         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
8417         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8418                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8419         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8420                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8421         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
8422         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
8423         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
8424         ihl_m = ipv4_m->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
8425         ihl_v = ipv4_v->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
8426         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_ihl, ihl_m);
8427         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_ihl, ihl_m & ihl_v);
8428         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
8429                  ipv4_m->hdr.type_of_service);
8430         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
8431         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
8432                  ipv4_m->hdr.type_of_service >> 2);
8433         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
8434         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8435                  ipv4_m->hdr.next_proto_id);
8436         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8437                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
8438         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8439                  ipv4_m->hdr.time_to_live);
8440         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8441                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
8442         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8443                  !!(ipv4_m->hdr.fragment_offset));
8444         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8445                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
8446 }
8447
8448 /**
8449  * Add IPV6 item to matcher and to the value.
8450  *
8451  * @param[in, out] matcher
8452  *   Flow matcher.
8453  * @param[in, out] key
8454  *   Flow matcher value.
8455  * @param[in] item
8456  *   Flow pattern to translate.
8457  * @param[in] inner
8458  *   Item is inner pattern.
8459  * @param[in] group
8460  *   The group to insert the rule.
8461  */
8462 static void
8463 flow_dv_translate_item_ipv6(void *matcher, void *key,
8464                             const struct rte_flow_item *item,
8465                             int inner, uint32_t group)
8466 {
8467         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
8468         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
8469         const struct rte_flow_item_ipv6 nic_mask = {
8470                 .hdr = {
8471                         .src_addr =
8472                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8473                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8474                         .dst_addr =
8475                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8476                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8477                         .vtc_flow = RTE_BE32(0xffffffff),
8478                         .proto = 0xff,
8479                         .hop_limits = 0xff,
8480                 },
8481         };
8482         void *headers_m;
8483         void *headers_v;
8484         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8485         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8486         char *l24_m;
8487         char *l24_v;
8488         uint32_t vtc_m;
8489         uint32_t vtc_v;
8490         int i;
8491         int size;
8492
8493         if (inner) {
8494                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8495                                          inner_headers);
8496                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8497         } else {
8498                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8499                                          outer_headers);
8500                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8501         }
8502         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
8503         if (!ipv6_v)
8504                 return;
8505         if (!ipv6_m)
8506                 ipv6_m = &nic_mask;
8507         size = sizeof(ipv6_m->hdr.dst_addr);
8508         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8509                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8510         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8511                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8512         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
8513         for (i = 0; i < size; ++i)
8514                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
8515         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8516                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8517         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8518                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8519         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
8520         for (i = 0; i < size; ++i)
8521                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
8522         /* TOS. */
8523         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
8524         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
8525         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
8526         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
8527         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
8528         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
8529         /* Label. */
8530         if (inner) {
8531                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
8532                          vtc_m);
8533                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
8534                          vtc_v);
8535         } else {
8536                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
8537                          vtc_m);
8538                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
8539                          vtc_v);
8540         }
8541         /* Protocol. */
8542         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8543                  ipv6_m->hdr.proto);
8544         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8545                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
8546         /* Hop limit. */
8547         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8548                  ipv6_m->hdr.hop_limits);
8549         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8550                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
8551         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8552                  !!(ipv6_m->has_frag_ext));
8553         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8554                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
8555 }
8556
8557 /**
8558  * Add IPV6 fragment extension item to matcher and to the value.
8559  *
8560  * @param[in, out] matcher
8561  *   Flow matcher.
8562  * @param[in, out] key
8563  *   Flow matcher value.
8564  * @param[in] item
8565  *   Flow pattern to translate.
8566  * @param[in] inner
8567  *   Item is inner pattern.
8568  */
8569 static void
8570 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
8571                                      const struct rte_flow_item *item,
8572                                      int inner)
8573 {
8574         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
8575         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
8576         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
8577                 .hdr = {
8578                         .next_header = 0xff,
8579                         .frag_data = RTE_BE16(0xffff),
8580                 },
8581         };
8582         void *headers_m;
8583         void *headers_v;
8584
8585         if (inner) {
8586                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8587                                          inner_headers);
8588                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8589         } else {
8590                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8591                                          outer_headers);
8592                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8593         }
8594         /* IPv6 fragment extension item exists, so packet is IP fragment. */
8595         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
8596         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
8597         if (!ipv6_frag_ext_v)
8598                 return;
8599         if (!ipv6_frag_ext_m)
8600                 ipv6_frag_ext_m = &nic_mask;
8601         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8602                  ipv6_frag_ext_m->hdr.next_header);
8603         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8604                  ipv6_frag_ext_v->hdr.next_header &
8605                  ipv6_frag_ext_m->hdr.next_header);
8606 }
8607
8608 /**
8609  * Add TCP item to matcher and to the value.
8610  *
8611  * @param[in, out] matcher
8612  *   Flow matcher.
8613  * @param[in, out] key
8614  *   Flow matcher value.
8615  * @param[in] item
8616  *   Flow pattern to translate.
8617  * @param[in] inner
8618  *   Item is inner pattern.
8619  */
8620 static void
8621 flow_dv_translate_item_tcp(void *matcher, void *key,
8622                            const struct rte_flow_item *item,
8623                            int inner)
8624 {
8625         const struct rte_flow_item_tcp *tcp_m = item->mask;
8626         const struct rte_flow_item_tcp *tcp_v = item->spec;
8627         void *headers_m;
8628         void *headers_v;
8629
8630         if (inner) {
8631                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8632                                          inner_headers);
8633                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8634         } else {
8635                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8636                                          outer_headers);
8637                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8638         }
8639         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8640         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8641         if (!tcp_v)
8642                 return;
8643         if (!tcp_m)
8644                 tcp_m = &rte_flow_item_tcp_mask;
8645         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8646                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
8647         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8648                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8649         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8650                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8651         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8652                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8653         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8654                  tcp_m->hdr.tcp_flags);
8655         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8656                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8657 }
8658
8659 /**
8660  * Add UDP item to matcher and to the value.
8661  *
8662  * @param[in, out] matcher
8663  *   Flow matcher.
8664  * @param[in, out] key
8665  *   Flow matcher value.
8666  * @param[in] item
8667  *   Flow pattern to translate.
8668  * @param[in] inner
8669  *   Item is inner pattern.
8670  */
8671 static void
8672 flow_dv_translate_item_udp(void *matcher, void *key,
8673                            const struct rte_flow_item *item,
8674                            int inner)
8675 {
8676         const struct rte_flow_item_udp *udp_m = item->mask;
8677         const struct rte_flow_item_udp *udp_v = item->spec;
8678         void *headers_m;
8679         void *headers_v;
8680
8681         if (inner) {
8682                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8683                                          inner_headers);
8684                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8685         } else {
8686                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8687                                          outer_headers);
8688                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8689         }
8690         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8691         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8692         if (!udp_v)
8693                 return;
8694         if (!udp_m)
8695                 udp_m = &rte_flow_item_udp_mask;
8696         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8697                  rte_be_to_cpu_16(udp_m->hdr.src_port));
8698         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8699                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8700         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8701                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
8702         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8703                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8704 }
8705
8706 /**
8707  * Add GRE optional Key item to matcher and to the value.
8708  *
8709  * @param[in, out] matcher
8710  *   Flow matcher.
8711  * @param[in, out] key
8712  *   Flow matcher value.
8713  * @param[in] item
8714  *   Flow pattern to translate.
8715  * @param[in] inner
8716  *   Item is inner pattern.
8717  */
8718 static void
8719 flow_dv_translate_item_gre_key(void *matcher, void *key,
8720                                    const struct rte_flow_item *item)
8721 {
8722         const rte_be32_t *key_m = item->mask;
8723         const rte_be32_t *key_v = item->spec;
8724         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8725         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8726         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8727
8728         /* GRE K bit must be on and should already be validated */
8729         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8730         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8731         if (!key_v)
8732                 return;
8733         if (!key_m)
8734                 key_m = &gre_key_default_mask;
8735         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8736                  rte_be_to_cpu_32(*key_m) >> 8);
8737         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8738                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8739         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8740                  rte_be_to_cpu_32(*key_m) & 0xFF);
8741         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8742                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8743 }
8744
8745 /**
8746  * Add GRE item to matcher and to the value.
8747  *
8748  * @param[in, out] matcher
8749  *   Flow matcher.
8750  * @param[in, out] key
8751  *   Flow matcher value.
8752  * @param[in] item
8753  *   Flow pattern to translate.
8754  * @param[in] pattern_flags
8755  *   Accumulated pattern flags.
8756  */
8757 static void
8758 flow_dv_translate_item_gre(void *matcher, void *key,
8759                            const struct rte_flow_item *item,
8760                            uint64_t pattern_flags)
8761 {
8762         static const struct rte_flow_item_gre empty_gre = {0,};
8763         const struct rte_flow_item_gre *gre_m = item->mask;
8764         const struct rte_flow_item_gre *gre_v = item->spec;
8765         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
8766         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8767         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8768         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8769         struct {
8770                 union {
8771                         __extension__
8772                         struct {
8773                                 uint16_t version:3;
8774                                 uint16_t rsvd0:9;
8775                                 uint16_t s_present:1;
8776                                 uint16_t k_present:1;
8777                                 uint16_t rsvd_bit1:1;
8778                                 uint16_t c_present:1;
8779                         };
8780                         uint16_t value;
8781                 };
8782         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8783         uint16_t protocol_m, protocol_v;
8784
8785         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8786         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8787         if (!gre_v) {
8788                 gre_v = &empty_gre;
8789                 gre_m = &empty_gre;
8790         } else {
8791                 if (!gre_m)
8792                         gre_m = &rte_flow_item_gre_mask;
8793         }
8794         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
8795         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
8796         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
8797                  gre_crks_rsvd0_ver_m.c_present);
8798         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
8799                  gre_crks_rsvd0_ver_v.c_present &
8800                  gre_crks_rsvd0_ver_m.c_present);
8801         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
8802                  gre_crks_rsvd0_ver_m.k_present);
8803         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
8804                  gre_crks_rsvd0_ver_v.k_present &
8805                  gre_crks_rsvd0_ver_m.k_present);
8806         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
8807                  gre_crks_rsvd0_ver_m.s_present);
8808         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
8809                  gre_crks_rsvd0_ver_v.s_present &
8810                  gre_crks_rsvd0_ver_m.s_present);
8811         protocol_m = rte_be_to_cpu_16(gre_m->protocol);
8812         protocol_v = rte_be_to_cpu_16(gre_v->protocol);
8813         if (!protocol_m) {
8814                 /* Force next protocol to prevent matchers duplication */
8815                 protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
8816                 if (protocol_v)
8817                         protocol_m = 0xFFFF;
8818         }
8819         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, protocol_m);
8820         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8821                  protocol_m & protocol_v);
8822 }
8823
8824 /**
8825  * Add NVGRE item to matcher and to the value.
8826  *
8827  * @param[in, out] matcher
8828  *   Flow matcher.
8829  * @param[in, out] key
8830  *   Flow matcher value.
8831  * @param[in] item
8832  *   Flow pattern to translate.
8833  * @param[in] pattern_flags
8834  *   Accumulated pattern flags.
8835  */
8836 static void
8837 flow_dv_translate_item_nvgre(void *matcher, void *key,
8838                              const struct rte_flow_item *item,
8839                              unsigned long pattern_flags)
8840 {
8841         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
8842         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
8843         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8844         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8845         const char *tni_flow_id_m;
8846         const char *tni_flow_id_v;
8847         char *gre_key_m;
8848         char *gre_key_v;
8849         int size;
8850         int i;
8851
8852         /* For NVGRE, GRE header fields must be set with defined values. */
8853         const struct rte_flow_item_gre gre_spec = {
8854                 .c_rsvd0_ver = RTE_BE16(0x2000),
8855                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
8856         };
8857         const struct rte_flow_item_gre gre_mask = {
8858                 .c_rsvd0_ver = RTE_BE16(0xB000),
8859                 .protocol = RTE_BE16(UINT16_MAX),
8860         };
8861         const struct rte_flow_item gre_item = {
8862                 .spec = &gre_spec,
8863                 .mask = &gre_mask,
8864                 .last = NULL,
8865         };
8866         flow_dv_translate_item_gre(matcher, key, &gre_item, pattern_flags);
8867         if (!nvgre_v)
8868                 return;
8869         if (!nvgre_m)
8870                 nvgre_m = &rte_flow_item_nvgre_mask;
8871         tni_flow_id_m = (const char *)nvgre_m->tni;
8872         tni_flow_id_v = (const char *)nvgre_v->tni;
8873         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
8874         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
8875         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
8876         memcpy(gre_key_m, tni_flow_id_m, size);
8877         for (i = 0; i < size; ++i)
8878                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
8879 }
8880
8881 /**
8882  * Add VXLAN item to matcher and to the value.
8883  *
8884  * @param[in] dev
8885  *   Pointer to the Ethernet device structure.
8886  * @param[in] attr
8887  *   Flow rule attributes.
8888  * @param[in, out] matcher
8889  *   Flow matcher.
8890  * @param[in, out] key
8891  *   Flow matcher value.
8892  * @param[in] item
8893  *   Flow pattern to translate.
8894  * @param[in] inner
8895  *   Item is inner pattern.
8896  */
8897 static void
8898 flow_dv_translate_item_vxlan(struct rte_eth_dev *dev,
8899                              const struct rte_flow_attr *attr,
8900                              void *matcher, void *key,
8901                              const struct rte_flow_item *item,
8902                              int inner)
8903 {
8904         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
8905         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
8906         void *headers_m;
8907         void *headers_v;
8908         void *misc5_m;
8909         void *misc5_v;
8910         uint32_t *tunnel_header_v;
8911         uint32_t *tunnel_header_m;
8912         uint16_t dport;
8913         struct mlx5_priv *priv = dev->data->dev_private;
8914         const struct rte_flow_item_vxlan nic_mask = {
8915                 .vni = "\xff\xff\xff",
8916                 .rsvd1 = 0xff,
8917         };
8918
8919         if (inner) {
8920                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8921                                          inner_headers);
8922                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8923         } else {
8924                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8925                                          outer_headers);
8926                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8927         }
8928         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8929                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8930         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8931                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8932                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8933         }
8934         dport = MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport);
8935         if (!vxlan_v)
8936                 return;
8937         if (!vxlan_m) {
8938                 if ((!attr->group && !priv->sh->tunnel_header_0_1) ||
8939                     (attr->group && !priv->sh->misc5_cap))
8940                         vxlan_m = &rte_flow_item_vxlan_mask;
8941                 else
8942                         vxlan_m = &nic_mask;
8943         }
8944         if ((priv->sh->steering_format_version ==
8945             MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 &&
8946             dport != MLX5_UDP_PORT_VXLAN) ||
8947             (!attr->group && !attr->transfer && !priv->sh->tunnel_header_0_1) ||
8948             ((attr->group || attr->transfer) && !priv->sh->misc5_cap)) {
8949                 void *misc_m;
8950                 void *misc_v;
8951                 char *vni_m;
8952                 char *vni_v;
8953                 int size;
8954                 int i;
8955                 misc_m = MLX5_ADDR_OF(fte_match_param,
8956                                       matcher, misc_parameters);
8957                 misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8958                 size = sizeof(vxlan_m->vni);
8959                 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8960                 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8961                 memcpy(vni_m, vxlan_m->vni, size);
8962                 for (i = 0; i < size; ++i)
8963                         vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8964                 return;
8965         }
8966         misc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5);
8967         misc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);
8968         tunnel_header_v = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8969                                                    misc5_v,
8970                                                    tunnel_header_1);
8971         tunnel_header_m = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8972                                                    misc5_m,
8973                                                    tunnel_header_1);
8974         *tunnel_header_v = (vxlan_v->vni[0] & vxlan_m->vni[0]) |
8975                            (vxlan_v->vni[1] & vxlan_m->vni[1]) << 8 |
8976                            (vxlan_v->vni[2] & vxlan_m->vni[2]) << 16;
8977         if (*tunnel_header_v)
8978                 *tunnel_header_m = vxlan_m->vni[0] |
8979                         vxlan_m->vni[1] << 8 |
8980                         vxlan_m->vni[2] << 16;
8981         else
8982                 *tunnel_header_m = 0x0;
8983         *tunnel_header_v |= (vxlan_v->rsvd1 & vxlan_m->rsvd1) << 24;
8984         if (vxlan_v->rsvd1 & vxlan_m->rsvd1)
8985                 *tunnel_header_m |= vxlan_m->rsvd1 << 24;
8986 }
8987
8988 /**
8989  * Add VXLAN-GPE item to matcher and to the value.
8990  *
8991  * @param[in, out] matcher
8992  *   Flow matcher.
8993  * @param[in, out] key
8994  *   Flow matcher value.
8995  * @param[in] item
8996  *   Flow pattern to translate.
8997  * @param[in] inner
8998  *   Item is inner pattern.
8999  */
9000
9001 static void
9002 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
9003                                  const struct rte_flow_item *item,
9004                                  const uint64_t pattern_flags)
9005 {
9006         static const struct rte_flow_item_vxlan_gpe dummy_vxlan_gpe_hdr = {0, };
9007         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
9008         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
9009         /* The item was validated to be on the outer side */
9010         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9011         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9012         void *misc_m =
9013                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
9014         void *misc_v =
9015                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9016         char *vni_m =
9017                 MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
9018         char *vni_v =
9019                 MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
9020         int i, size = sizeof(vxlan_m->vni);
9021         uint8_t flags_m = 0xff;
9022         uint8_t flags_v = 0xc;
9023         uint8_t m_protocol, v_protocol;
9024
9025         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9026                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9027                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9028                          MLX5_UDP_PORT_VXLAN_GPE);
9029         }
9030         if (!vxlan_v) {
9031                 vxlan_v = &dummy_vxlan_gpe_hdr;
9032                 vxlan_m = &dummy_vxlan_gpe_hdr;
9033         } else {
9034                 if (!vxlan_m)
9035                         vxlan_m = &rte_flow_item_vxlan_gpe_mask;
9036         }
9037         memcpy(vni_m, vxlan_m->vni, size);
9038         for (i = 0; i < size; ++i)
9039                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
9040         if (vxlan_m->flags) {
9041                 flags_m = vxlan_m->flags;
9042                 flags_v = vxlan_v->flags;
9043         }
9044         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
9045         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
9046         m_protocol = vxlan_m->protocol;
9047         v_protocol = vxlan_v->protocol;
9048         if (!m_protocol) {
9049                 /* Force next protocol to ensure next headers parsing. */
9050                 if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
9051                         v_protocol = RTE_VXLAN_GPE_TYPE_ETH;
9052                 else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
9053                         v_protocol = RTE_VXLAN_GPE_TYPE_IPV4;
9054                 else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
9055                         v_protocol = RTE_VXLAN_GPE_TYPE_IPV6;
9056                 if (v_protocol)
9057                         m_protocol = 0xFF;
9058         }
9059         MLX5_SET(fte_match_set_misc3, misc_m,
9060                  outer_vxlan_gpe_next_protocol, m_protocol);
9061         MLX5_SET(fte_match_set_misc3, misc_v,
9062                  outer_vxlan_gpe_next_protocol, m_protocol & v_protocol);
9063 }
9064
9065 /**
9066  * Add Geneve item to matcher and to the value.
9067  *
9068  * @param[in, out] matcher
9069  *   Flow matcher.
9070  * @param[in, out] key
9071  *   Flow matcher value.
9072  * @param[in] item
9073  *   Flow pattern to translate.
9074  * @param[in] inner
9075  *   Item is inner pattern.
9076  */
9077
9078 static void
9079 flow_dv_translate_item_geneve(void *matcher, void *key,
9080                               const struct rte_flow_item *item,
9081                               uint64_t pattern_flags)
9082 {
9083         static const struct rte_flow_item_geneve empty_geneve = {0,};
9084         const struct rte_flow_item_geneve *geneve_m = item->mask;
9085         const struct rte_flow_item_geneve *geneve_v = item->spec;
9086         /* GENEVE flow item validation allows single tunnel item */
9087         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9088         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9089         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9090         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9091         uint16_t gbhdr_m;
9092         uint16_t gbhdr_v;
9093         char *vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
9094         char *vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
9095         size_t size = sizeof(geneve_m->vni), i;
9096         uint16_t protocol_m, protocol_v;
9097
9098         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9099                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9100                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9101                          MLX5_UDP_PORT_GENEVE);
9102         }
9103         if (!geneve_v) {
9104                 geneve_v = &empty_geneve;
9105                 geneve_m = &empty_geneve;
9106         } else {
9107                 if (!geneve_m)
9108                         geneve_m = &rte_flow_item_geneve_mask;
9109         }
9110         memcpy(vni_m, geneve_m->vni, size);
9111         for (i = 0; i < size; ++i)
9112                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
9113         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
9114         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
9115         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
9116                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9117         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
9118                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9119         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9120                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9121         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9122                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
9123                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9124         protocol_m = rte_be_to_cpu_16(geneve_m->protocol);
9125         protocol_v = rte_be_to_cpu_16(geneve_v->protocol);
9126         if (!protocol_m) {
9127                 /* Force next protocol to prevent matchers duplication */
9128                 protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
9129                 if (protocol_v)
9130                         protocol_m = 0xFFFF;
9131         }
9132         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, protocol_m);
9133         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
9134                  protocol_m & protocol_v);
9135 }
9136
9137 /**
9138  * Create Geneve TLV option resource.
9139  *
9140  * @param dev[in, out]
9141  *   Pointer to rte_eth_dev structure.
9142  * @param[in, out] tag_be24
9143  *   Tag value in big endian then R-shift 8.
9144  * @parm[in, out] dev_flow
9145  *   Pointer to the dev_flow.
9146  * @param[out] error
9147  *   pointer to error structure.
9148  *
9149  * @return
9150  *   0 on success otherwise -errno and errno is set.
9151  */
9152
9153 int
9154 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
9155                                              const struct rte_flow_item *item,
9156                                              struct rte_flow_error *error)
9157 {
9158         struct mlx5_priv *priv = dev->data->dev_private;
9159         struct mlx5_dev_ctx_shared *sh = priv->sh;
9160         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
9161                         sh->geneve_tlv_option_resource;
9162         struct mlx5_devx_obj *obj;
9163         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9164         int ret = 0;
9165
9166         if (!geneve_opt_v)
9167                 return -1;
9168         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
9169         if (geneve_opt_resource != NULL) {
9170                 if (geneve_opt_resource->option_class ==
9171                         geneve_opt_v->option_class &&
9172                         geneve_opt_resource->option_type ==
9173                         geneve_opt_v->option_type &&
9174                         geneve_opt_resource->length ==
9175                         geneve_opt_v->option_len) {
9176                         /* We already have GENEVE TLV option obj allocated. */
9177                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
9178                                            __ATOMIC_RELAXED);
9179                 } else {
9180                         ret = rte_flow_error_set(error, ENOMEM,
9181                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9182                                 "Only one GENEVE TLV option supported");
9183                         goto exit;
9184                 }
9185         } else {
9186                 /* Create a GENEVE TLV object and resource. */
9187                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->cdev->ctx,
9188                                 geneve_opt_v->option_class,
9189                                 geneve_opt_v->option_type,
9190                                 geneve_opt_v->option_len);
9191                 if (!obj) {
9192                         ret = rte_flow_error_set(error, ENODATA,
9193                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9194                                 "Failed to create GENEVE TLV Devx object");
9195                         goto exit;
9196                 }
9197                 sh->geneve_tlv_option_resource =
9198                                 mlx5_malloc(MLX5_MEM_ZERO,
9199                                                 sizeof(*geneve_opt_resource),
9200                                                 0, SOCKET_ID_ANY);
9201                 if (!sh->geneve_tlv_option_resource) {
9202                         claim_zero(mlx5_devx_cmd_destroy(obj));
9203                         ret = rte_flow_error_set(error, ENOMEM,
9204                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9205                                 "GENEVE TLV object memory allocation failed");
9206                         goto exit;
9207                 }
9208                 geneve_opt_resource = sh->geneve_tlv_option_resource;
9209                 geneve_opt_resource->obj = obj;
9210                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
9211                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
9212                 geneve_opt_resource->length = geneve_opt_v->option_len;
9213                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
9214                                 __ATOMIC_RELAXED);
9215         }
9216 exit:
9217         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
9218         return ret;
9219 }
9220
9221 /**
9222  * Add Geneve TLV option item to matcher.
9223  *
9224  * @param[in, out] dev
9225  *   Pointer to rte_eth_dev structure.
9226  * @param[in, out] matcher
9227  *   Flow matcher.
9228  * @param[in, out] key
9229  *   Flow matcher value.
9230  * @param[in] item
9231  *   Flow pattern to translate.
9232  * @param[out] error
9233  *   Pointer to error structure.
9234  */
9235 static int
9236 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
9237                                   void *key, const struct rte_flow_item *item,
9238                                   struct rte_flow_error *error)
9239 {
9240         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
9241         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9242         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9243         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9244         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9245                         misc_parameters_3);
9246         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9247         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
9248         int ret = 0;
9249
9250         if (!geneve_opt_v)
9251                 return -1;
9252         if (!geneve_opt_m)
9253                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
9254         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
9255                                                            error);
9256         if (ret) {
9257                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
9258                 return ret;
9259         }
9260         /*
9261          * Set the option length in GENEVE header if not requested.
9262          * The GENEVE TLV option length is expressed by the option length field
9263          * in the GENEVE header.
9264          * If the option length was not requested but the GENEVE TLV option item
9265          * is present we set the option length field implicitly.
9266          */
9267         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
9268                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9269                          MLX5_GENEVE_OPTLEN_MASK);
9270                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9271                          geneve_opt_v->option_len + 1);
9272         }
9273         MLX5_SET(fte_match_set_misc, misc_m, geneve_tlv_option_0_exist, 1);
9274         MLX5_SET(fte_match_set_misc, misc_v, geneve_tlv_option_0_exist, 1);
9275         /* Set the data. */
9276         if (geneve_opt_v->data) {
9277                 memcpy(&opt_data_key, geneve_opt_v->data,
9278                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9279                                 sizeof(opt_data_key)));
9280                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9281                                 sizeof(opt_data_key));
9282                 memcpy(&opt_data_mask, geneve_opt_m->data,
9283                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9284                                 sizeof(opt_data_mask)));
9285                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9286                                 sizeof(opt_data_mask));
9287                 MLX5_SET(fte_match_set_misc3, misc3_m,
9288                                 geneve_tlv_option_0_data,
9289                                 rte_be_to_cpu_32(opt_data_mask));
9290                 MLX5_SET(fte_match_set_misc3, misc3_v,
9291                                 geneve_tlv_option_0_data,
9292                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
9293         }
9294         return ret;
9295 }
9296
9297 /**
9298  * Add MPLS item to matcher and to the value.
9299  *
9300  * @param[in, out] matcher
9301  *   Flow matcher.
9302  * @param[in, out] key
9303  *   Flow matcher value.
9304  * @param[in] item
9305  *   Flow pattern to translate.
9306  * @param[in] prev_layer
9307  *   The protocol layer indicated in previous item.
9308  * @param[in] inner
9309  *   Item is inner pattern.
9310  */
9311 static void
9312 flow_dv_translate_item_mpls(void *matcher, void *key,
9313                             const struct rte_flow_item *item,
9314                             uint64_t prev_layer,
9315                             int inner)
9316 {
9317         const uint32_t *in_mpls_m = item->mask;
9318         const uint32_t *in_mpls_v = item->spec;
9319         uint32_t *out_mpls_m = 0;
9320         uint32_t *out_mpls_v = 0;
9321         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9322         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9323         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
9324                                      misc_parameters_2);
9325         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9326         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9327         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9328
9329         switch (prev_layer) {
9330         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9331                 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9332                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
9333                                  0xffff);
9334                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9335                                  MLX5_UDP_PORT_MPLS);
9336                 }
9337                 break;
9338         case MLX5_FLOW_LAYER_GRE:
9339                 /* Fall-through. */
9340         case MLX5_FLOW_LAYER_GRE_KEY:
9341                 if (!MLX5_GET16(fte_match_set_misc, misc_v, gre_protocol)) {
9342                         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
9343                                  0xffff);
9344                         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
9345                                  RTE_ETHER_TYPE_MPLS);
9346                 }
9347                 break;
9348         default:
9349                 break;
9350         }
9351         if (!in_mpls_v)
9352                 return;
9353         if (!in_mpls_m)
9354                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
9355         switch (prev_layer) {
9356         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9357                 out_mpls_m =
9358                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9359                                                  outer_first_mpls_over_udp);
9360                 out_mpls_v =
9361                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9362                                                  outer_first_mpls_over_udp);
9363                 break;
9364         case MLX5_FLOW_LAYER_GRE:
9365                 out_mpls_m =
9366                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9367                                                  outer_first_mpls_over_gre);
9368                 out_mpls_v =
9369                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9370                                                  outer_first_mpls_over_gre);
9371                 break;
9372         default:
9373                 /* Inner MPLS not over GRE is not supported. */
9374                 if (!inner) {
9375                         out_mpls_m =
9376                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9377                                                          misc2_m,
9378                                                          outer_first_mpls);
9379                         out_mpls_v =
9380                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9381                                                          misc2_v,
9382                                                          outer_first_mpls);
9383                 }
9384                 break;
9385         }
9386         if (out_mpls_m && out_mpls_v) {
9387                 *out_mpls_m = *in_mpls_m;
9388                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
9389         }
9390 }
9391
9392 /**
9393  * Add metadata register item to matcher
9394  *
9395  * @param[in, out] matcher
9396  *   Flow matcher.
9397  * @param[in, out] key
9398  *   Flow matcher value.
9399  * @param[in] reg_type
9400  *   Type of device metadata register
9401  * @param[in] value
9402  *   Register value
9403  * @param[in] mask
9404  *   Register mask
9405  */
9406 static void
9407 flow_dv_match_meta_reg(void *matcher, void *key,
9408                        enum modify_reg reg_type,
9409                        uint32_t data, uint32_t mask)
9410 {
9411         void *misc2_m =
9412                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
9413         void *misc2_v =
9414                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9415         uint32_t temp;
9416
9417         data &= mask;
9418         switch (reg_type) {
9419         case REG_A:
9420                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
9421                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
9422                 break;
9423         case REG_B:
9424                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
9425                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
9426                 break;
9427         case REG_C_0:
9428                 /*
9429                  * The metadata register C0 field might be divided into
9430                  * source vport index and META item value, we should set
9431                  * this field according to specified mask, not as whole one.
9432                  */
9433                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
9434                 temp |= mask;
9435                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
9436                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
9437                 temp &= ~mask;
9438                 temp |= data;
9439                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
9440                 break;
9441         case REG_C_1:
9442                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
9443                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
9444                 break;
9445         case REG_C_2:
9446                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
9447                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
9448                 break;
9449         case REG_C_3:
9450                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
9451                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
9452                 break;
9453         case REG_C_4:
9454                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
9455                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
9456                 break;
9457         case REG_C_5:
9458                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
9459                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
9460                 break;
9461         case REG_C_6:
9462                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
9463                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
9464                 break;
9465         case REG_C_7:
9466                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
9467                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
9468                 break;
9469         default:
9470                 MLX5_ASSERT(false);
9471                 break;
9472         }
9473 }
9474
9475 /**
9476  * Add MARK item to matcher
9477  *
9478  * @param[in] dev
9479  *   The device to configure through.
9480  * @param[in, out] matcher
9481  *   Flow matcher.
9482  * @param[in, out] key
9483  *   Flow matcher value.
9484  * @param[in] item
9485  *   Flow pattern to translate.
9486  */
9487 static void
9488 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
9489                             void *matcher, void *key,
9490                             const struct rte_flow_item *item)
9491 {
9492         struct mlx5_priv *priv = dev->data->dev_private;
9493         const struct rte_flow_item_mark *mark;
9494         uint32_t value;
9495         uint32_t mask;
9496
9497         mark = item->mask ? (const void *)item->mask :
9498                             &rte_flow_item_mark_mask;
9499         mask = mark->id & priv->sh->dv_mark_mask;
9500         mark = (const void *)item->spec;
9501         MLX5_ASSERT(mark);
9502         value = mark->id & priv->sh->dv_mark_mask & mask;
9503         if (mask) {
9504                 enum modify_reg reg;
9505
9506                 /* Get the metadata register index for the mark. */
9507                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
9508                 MLX5_ASSERT(reg > 0);
9509                 if (reg == REG_C_0) {
9510                         struct mlx5_priv *priv = dev->data->dev_private;
9511                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9512                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9513
9514                         mask &= msk_c0;
9515                         mask <<= shl_c0;
9516                         value <<= shl_c0;
9517                 }
9518                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9519         }
9520 }
9521
9522 /**
9523  * Add META item to matcher
9524  *
9525  * @param[in] dev
9526  *   The devich to configure through.
9527  * @param[in, out] matcher
9528  *   Flow matcher.
9529  * @param[in, out] key
9530  *   Flow matcher value.
9531  * @param[in] attr
9532  *   Attributes of flow that includes this item.
9533  * @param[in] item
9534  *   Flow pattern to translate.
9535  */
9536 static void
9537 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
9538                             void *matcher, void *key,
9539                             const struct rte_flow_attr *attr,
9540                             const struct rte_flow_item *item)
9541 {
9542         const struct rte_flow_item_meta *meta_m;
9543         const struct rte_flow_item_meta *meta_v;
9544
9545         meta_m = (const void *)item->mask;
9546         if (!meta_m)
9547                 meta_m = &rte_flow_item_meta_mask;
9548         meta_v = (const void *)item->spec;
9549         if (meta_v) {
9550                 int reg;
9551                 uint32_t value = meta_v->data;
9552                 uint32_t mask = meta_m->data;
9553
9554                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
9555                 if (reg < 0)
9556                         return;
9557                 MLX5_ASSERT(reg != REG_NON);
9558                 if (reg == REG_C_0) {
9559                         struct mlx5_priv *priv = dev->data->dev_private;
9560                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9561                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9562
9563                         mask &= msk_c0;
9564                         mask <<= shl_c0;
9565                         value <<= shl_c0;
9566                 }
9567                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9568         }
9569 }
9570
9571 /**
9572  * Add vport metadata Reg C0 item to matcher
9573  *
9574  * @param[in, out] matcher
9575  *   Flow matcher.
9576  * @param[in, out] key
9577  *   Flow matcher value.
9578  * @param[in] reg
9579  *   Flow pattern to translate.
9580  */
9581 static void
9582 flow_dv_translate_item_meta_vport(void *matcher, void *key,
9583                                   uint32_t value, uint32_t mask)
9584 {
9585         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
9586 }
9587
9588 /**
9589  * Add tag item to matcher
9590  *
9591  * @param[in] dev
9592  *   The devich to configure through.
9593  * @param[in, out] matcher
9594  *   Flow matcher.
9595  * @param[in, out] key
9596  *   Flow matcher value.
9597  * @param[in] item
9598  *   Flow pattern to translate.
9599  */
9600 static void
9601 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
9602                                 void *matcher, void *key,
9603                                 const struct rte_flow_item *item)
9604 {
9605         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
9606         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
9607         uint32_t mask, value;
9608
9609         MLX5_ASSERT(tag_v);
9610         value = tag_v->data;
9611         mask = tag_m ? tag_m->data : UINT32_MAX;
9612         if (tag_v->id == REG_C_0) {
9613                 struct mlx5_priv *priv = dev->data->dev_private;
9614                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9615                 uint32_t shl_c0 = rte_bsf32(msk_c0);
9616
9617                 mask &= msk_c0;
9618                 mask <<= shl_c0;
9619                 value <<= shl_c0;
9620         }
9621         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
9622 }
9623
9624 /**
9625  * Add TAG item to matcher
9626  *
9627  * @param[in] dev
9628  *   The devich to configure through.
9629  * @param[in, out] matcher
9630  *   Flow matcher.
9631  * @param[in, out] key
9632  *   Flow matcher value.
9633  * @param[in] item
9634  *   Flow pattern to translate.
9635  */
9636 static void
9637 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
9638                            void *matcher, void *key,
9639                            const struct rte_flow_item *item)
9640 {
9641         const struct rte_flow_item_tag *tag_v = item->spec;
9642         const struct rte_flow_item_tag *tag_m = item->mask;
9643         enum modify_reg reg;
9644
9645         MLX5_ASSERT(tag_v);
9646         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
9647         /* Get the metadata register index for the tag. */
9648         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
9649         MLX5_ASSERT(reg > 0);
9650         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
9651 }
9652
9653 /**
9654  * Add source vport match to the specified matcher.
9655  *
9656  * @param[in, out] matcher
9657  *   Flow matcher.
9658  * @param[in, out] key
9659  *   Flow matcher value.
9660  * @param[in] port
9661  *   Source vport value to match
9662  * @param[in] mask
9663  *   Mask
9664  */
9665 static void
9666 flow_dv_translate_item_source_vport(void *matcher, void *key,
9667                                     int16_t port, uint16_t mask)
9668 {
9669         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9670         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9671
9672         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
9673         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
9674 }
9675
9676 /**
9677  * Translate port-id item to eswitch match on  port-id.
9678  *
9679  * @param[in] dev
9680  *   The devich to configure through.
9681  * @param[in, out] matcher
9682  *   Flow matcher.
9683  * @param[in, out] key
9684  *   Flow matcher value.
9685  * @param[in] item
9686  *   Flow pattern to translate.
9687  * @param[in]
9688  *   Flow attributes.
9689  *
9690  * @return
9691  *   0 on success, a negative errno value otherwise.
9692  */
9693 static int
9694 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
9695                                void *key, const struct rte_flow_item *item,
9696                                const struct rte_flow_attr *attr)
9697 {
9698         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
9699         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
9700         struct mlx5_priv *priv;
9701         uint16_t mask, id;
9702
9703         if (pid_v && pid_v->id == MLX5_PORT_ESW_MGR) {
9704                 flow_dv_translate_item_source_vport(matcher, key,
9705                         flow_dv_get_esw_manager_vport_id(dev), 0xffff);
9706                 return 0;
9707         }
9708         mask = pid_m ? pid_m->id : 0xffff;
9709         id = pid_v ? pid_v->id : dev->data->port_id;
9710         priv = mlx5_port_to_eswitch_info(id, item == NULL);
9711         if (!priv)
9712                 return -rte_errno;
9713         /*
9714          * Translate to vport field or to metadata, depending on mode.
9715          * Kernel can use either misc.source_port or half of C0 metadata
9716          * register.
9717          */
9718         if (priv->vport_meta_mask) {
9719                 /*
9720                  * Provide the hint for SW steering library
9721                  * to insert the flow into ingress domain and
9722                  * save the extra vport match.
9723                  */
9724                 if (mask == 0xffff && priv->vport_id == 0xffff &&
9725                     priv->pf_bond < 0 && attr->transfer)
9726                         flow_dv_translate_item_source_vport
9727                                 (matcher, key, priv->vport_id, mask);
9728                 /*
9729                  * We should always set the vport metadata register,
9730                  * otherwise the SW steering library can drop
9731                  * the rule if wire vport metadata value is not zero,
9732                  * it depends on kernel configuration.
9733                  */
9734                 flow_dv_translate_item_meta_vport(matcher, key,
9735                                                   priv->vport_meta_tag,
9736                                                   priv->vport_meta_mask);
9737         } else {
9738                 flow_dv_translate_item_source_vport(matcher, key,
9739                                                     priv->vport_id, mask);
9740         }
9741         return 0;
9742 }
9743
9744 /**
9745  * Add ICMP6 item to matcher and to the value.
9746  *
9747  * @param[in, out] matcher
9748  *   Flow matcher.
9749  * @param[in, out] key
9750  *   Flow matcher value.
9751  * @param[in] item
9752  *   Flow pattern to translate.
9753  * @param[in] inner
9754  *   Item is inner pattern.
9755  */
9756 static void
9757 flow_dv_translate_item_icmp6(void *matcher, void *key,
9758                               const struct rte_flow_item *item,
9759                               int inner)
9760 {
9761         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
9762         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
9763         void *headers_m;
9764         void *headers_v;
9765         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9766                                      misc_parameters_3);
9767         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9768         if (inner) {
9769                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9770                                          inner_headers);
9771                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9772         } else {
9773                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9774                                          outer_headers);
9775                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9776         }
9777         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9778         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
9779         if (!icmp6_v)
9780                 return;
9781         if (!icmp6_m)
9782                 icmp6_m = &rte_flow_item_icmp6_mask;
9783         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
9784         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
9785                  icmp6_v->type & icmp6_m->type);
9786         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
9787         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
9788                  icmp6_v->code & icmp6_m->code);
9789 }
9790
9791 /**
9792  * Add ICMP item to matcher and to the value.
9793  *
9794  * @param[in, out] matcher
9795  *   Flow matcher.
9796  * @param[in, out] key
9797  *   Flow matcher value.
9798  * @param[in] item
9799  *   Flow pattern to translate.
9800  * @param[in] inner
9801  *   Item is inner pattern.
9802  */
9803 static void
9804 flow_dv_translate_item_icmp(void *matcher, void *key,
9805                             const struct rte_flow_item *item,
9806                             int inner)
9807 {
9808         const struct rte_flow_item_icmp *icmp_m = item->mask;
9809         const struct rte_flow_item_icmp *icmp_v = item->spec;
9810         uint32_t icmp_header_data_m = 0;
9811         uint32_t icmp_header_data_v = 0;
9812         void *headers_m;
9813         void *headers_v;
9814         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9815                                      misc_parameters_3);
9816         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9817         if (inner) {
9818                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9819                                          inner_headers);
9820                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9821         } else {
9822                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9823                                          outer_headers);
9824                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9825         }
9826         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9827         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
9828         if (!icmp_v)
9829                 return;
9830         if (!icmp_m)
9831                 icmp_m = &rte_flow_item_icmp_mask;
9832         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
9833                  icmp_m->hdr.icmp_type);
9834         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
9835                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
9836         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
9837                  icmp_m->hdr.icmp_code);
9838         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
9839                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
9840         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
9841         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
9842         if (icmp_header_data_m) {
9843                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
9844                 icmp_header_data_v |=
9845                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
9846                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
9847                          icmp_header_data_m);
9848                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
9849                          icmp_header_data_v & icmp_header_data_m);
9850         }
9851 }
9852
9853 /**
9854  * Add GTP item to matcher and to the value.
9855  *
9856  * @param[in, out] matcher
9857  *   Flow matcher.
9858  * @param[in, out] key
9859  *   Flow matcher value.
9860  * @param[in] item
9861  *   Flow pattern to translate.
9862  * @param[in] inner
9863  *   Item is inner pattern.
9864  */
9865 static void
9866 flow_dv_translate_item_gtp(void *matcher, void *key,
9867                            const struct rte_flow_item *item, int inner)
9868 {
9869         const struct rte_flow_item_gtp *gtp_m = item->mask;
9870         const struct rte_flow_item_gtp *gtp_v = item->spec;
9871         void *headers_m;
9872         void *headers_v;
9873         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9874                                      misc_parameters_3);
9875         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9876         uint16_t dport = RTE_GTPU_UDP_PORT;
9877
9878         if (inner) {
9879                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9880                                          inner_headers);
9881                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9882         } else {
9883                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9884                                          outer_headers);
9885                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9886         }
9887         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9888                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9889                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9890         }
9891         if (!gtp_v)
9892                 return;
9893         if (!gtp_m)
9894                 gtp_m = &rte_flow_item_gtp_mask;
9895         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
9896                  gtp_m->v_pt_rsv_flags);
9897         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
9898                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
9899         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
9900         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
9901                  gtp_v->msg_type & gtp_m->msg_type);
9902         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
9903                  rte_be_to_cpu_32(gtp_m->teid));
9904         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
9905                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
9906 }
9907
9908 /**
9909  * Add GTP PSC item to matcher.
9910  *
9911  * @param[in, out] matcher
9912  *   Flow matcher.
9913  * @param[in, out] key
9914  *   Flow matcher value.
9915  * @param[in] item
9916  *   Flow pattern to translate.
9917  */
9918 static int
9919 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
9920                                const struct rte_flow_item *item)
9921 {
9922         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
9923         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
9924         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9925                         misc_parameters_3);
9926         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9927         union {
9928                 uint32_t w32;
9929                 struct {
9930                         uint16_t seq_num;
9931                         uint8_t npdu_num;
9932                         uint8_t next_ext_header_type;
9933                 };
9934         } dw_2;
9935         uint8_t gtp_flags;
9936
9937         /* Always set E-flag match on one, regardless of GTP item settings. */
9938         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
9939         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9940         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
9941         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
9942         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9943         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
9944         /*Set next extension header type. */
9945         dw_2.seq_num = 0;
9946         dw_2.npdu_num = 0;
9947         dw_2.next_ext_header_type = 0xff;
9948         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
9949                  rte_cpu_to_be_32(dw_2.w32));
9950         dw_2.seq_num = 0;
9951         dw_2.npdu_num = 0;
9952         dw_2.next_ext_header_type = 0x85;
9953         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
9954                  rte_cpu_to_be_32(dw_2.w32));
9955         if (gtp_psc_v) {
9956                 union {
9957                         uint32_t w32;
9958                         struct {
9959                                 uint8_t len;
9960                                 uint8_t type_flags;
9961                                 uint8_t qfi;
9962                                 uint8_t reserved;
9963                         };
9964                 } dw_0;
9965
9966                 /*Set extension header PDU type and Qos. */
9967                 if (!gtp_psc_m)
9968                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
9969                 dw_0.w32 = 0;
9970                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->hdr.type);
9971                 dw_0.qfi = gtp_psc_m->hdr.qfi;
9972                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
9973                          rte_cpu_to_be_32(dw_0.w32));
9974                 dw_0.w32 = 0;
9975                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->hdr.type &
9976                                                         gtp_psc_m->hdr.type);
9977                 dw_0.qfi = gtp_psc_v->hdr.qfi & gtp_psc_m->hdr.qfi;
9978                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9979                          rte_cpu_to_be_32(dw_0.w32));
9980         }
9981         return 0;
9982 }
9983
9984 /**
9985  * Add eCPRI item to matcher and to the value.
9986  *
9987  * @param[in] dev
9988  *   The devich to configure through.
9989  * @param[in, out] matcher
9990  *   Flow matcher.
9991  * @param[in, out] key
9992  *   Flow matcher value.
9993  * @param[in] item
9994  *   Flow pattern to translate.
9995  * @param[in] last_item
9996  *   Last item flags.
9997  */
9998 static void
9999 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
10000                              void *key, const struct rte_flow_item *item,
10001                              uint64_t last_item)
10002 {
10003         struct mlx5_priv *priv = dev->data->dev_private;
10004         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
10005         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
10006         struct rte_ecpri_common_hdr common;
10007         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
10008                                      misc_parameters_4);
10009         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
10010         uint32_t *samples;
10011         void *dw_m;
10012         void *dw_v;
10013
10014         /*
10015          * In case of eCPRI over Ethernet, if EtherType is not specified,
10016          * match on eCPRI EtherType implicitly.
10017          */
10018         if (last_item & MLX5_FLOW_LAYER_OUTER_L2) {
10019                 void *hdrs_m, *hdrs_v, *l2m, *l2v;
10020
10021                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
10022                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
10023                 l2m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, ethertype);
10024                 l2v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
10025                 if (*(uint16_t *)l2m == 0 && *(uint16_t *)l2v == 0) {
10026                         *(uint16_t *)l2m = UINT16_MAX;
10027                         *(uint16_t *)l2v = RTE_BE16(RTE_ETHER_TYPE_ECPRI);
10028                 }
10029         }
10030         if (!ecpri_v)
10031                 return;
10032         if (!ecpri_m)
10033                 ecpri_m = &rte_flow_item_ecpri_mask;
10034         /*
10035          * Maximal four DW samples are supported in a single matching now.
10036          * Two are used now for a eCPRI matching:
10037          * 1. Type: one byte, mask should be 0x00ff0000 in network order
10038          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
10039          *    if any.
10040          */
10041         if (!ecpri_m->hdr.common.u32)
10042                 return;
10043         samples = priv->sh->ecpri_parser.ids;
10044         /* Need to take the whole DW as the mask to fill the entry. */
10045         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
10046                             prog_sample_field_value_0);
10047         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
10048                             prog_sample_field_value_0);
10049         /* Already big endian (network order) in the header. */
10050         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
10051         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
10052         /* Sample#0, used for matching type, offset 0. */
10053         MLX5_SET(fte_match_set_misc4, misc4_m,
10054                  prog_sample_field_id_0, samples[0]);
10055         /* It makes no sense to set the sample ID in the mask field. */
10056         MLX5_SET(fte_match_set_misc4, misc4_v,
10057                  prog_sample_field_id_0, samples[0]);
10058         /*
10059          * Checking if message body part needs to be matched.
10060          * Some wildcard rules only matching type field should be supported.
10061          */
10062         if (ecpri_m->hdr.dummy[0]) {
10063                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
10064                 switch (common.type) {
10065                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
10066                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
10067                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
10068                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
10069                                             prog_sample_field_value_1);
10070                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
10071                                             prog_sample_field_value_1);
10072                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
10073                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
10074                                             ecpri_m->hdr.dummy[0];
10075                         /* Sample#1, to match message body, offset 4. */
10076                         MLX5_SET(fte_match_set_misc4, misc4_m,
10077                                  prog_sample_field_id_1, samples[1]);
10078                         MLX5_SET(fte_match_set_misc4, misc4_v,
10079                                  prog_sample_field_id_1, samples[1]);
10080                         break;
10081                 default:
10082                         /* Others, do not match any sample ID. */
10083                         break;
10084                 }
10085         }
10086 }
10087
10088 /*
10089  * Add connection tracking status item to matcher
10090  *
10091  * @param[in] dev
10092  *   The devich to configure through.
10093  * @param[in, out] matcher
10094  *   Flow matcher.
10095  * @param[in, out] key
10096  *   Flow matcher value.
10097  * @param[in] item
10098  *   Flow pattern to translate.
10099  */
10100 static void
10101 flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
10102                               void *matcher, void *key,
10103                               const struct rte_flow_item *item)
10104 {
10105         uint32_t reg_value = 0;
10106         int reg_id;
10107         /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */
10108         uint32_t reg_mask = 0;
10109         const struct rte_flow_item_conntrack *spec = item->spec;
10110         const struct rte_flow_item_conntrack *mask = item->mask;
10111         uint32_t flags;
10112         struct rte_flow_error error;
10113
10114         if (!mask)
10115                 mask = &rte_flow_item_conntrack_mask;
10116         if (!spec || !mask->flags)
10117                 return;
10118         flags = spec->flags & mask->flags;
10119         /* The conflict should be checked in the validation. */
10120         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
10121                 reg_value |= MLX5_CT_SYNDROME_VALID;
10122         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10123                 reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
10124         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
10125                 reg_value |= MLX5_CT_SYNDROME_INVALID;
10126         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
10127                 reg_value |= MLX5_CT_SYNDROME_TRAP;
10128         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10129                 reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
10130         if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
10131                            RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
10132                            RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
10133                 reg_mask |= 0xc0;
10134         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10135                 reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
10136         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10137                 reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
10138         /* The REG_C_x value could be saved during startup. */
10139         reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);
10140         if (reg_id == REG_NON)
10141                 return;
10142         flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,
10143                                reg_value, reg_mask);
10144 }
10145
10146 static void
10147 flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key,
10148                             const struct rte_flow_item *item,
10149                             struct mlx5_flow *dev_flow, bool is_inner)
10150 {
10151         const struct rte_flow_item_flex *spec =
10152                 (const struct rte_flow_item_flex *)item->spec;
10153         int index = mlx5_flex_acquire_index(dev, spec->handle, false);
10154
10155         MLX5_ASSERT(index >= 0 && index <= (int)(sizeof(uint32_t) * CHAR_BIT));
10156         if (index < 0)
10157                 return;
10158         if (!(dev_flow->handle->flex_item & RTE_BIT32(index))) {
10159                 /* Don't count both inner and outer flex items in one rule. */
10160                 if (mlx5_flex_acquire_index(dev, spec->handle, true) != index)
10161                         MLX5_ASSERT(false);
10162                 dev_flow->handle->flex_item |= RTE_BIT32(index);
10163         }
10164         mlx5_flex_flow_translate_item(dev, matcher, key, item, is_inner);
10165 }
10166
10167 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
10168
10169 #define HEADER_IS_ZERO(match_criteria, headers)                              \
10170         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
10171                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
10172
10173 /**
10174  * Calculate flow matcher enable bitmap.
10175  *
10176  * @param match_criteria
10177  *   Pointer to flow matcher criteria.
10178  *
10179  * @return
10180  *   Bitmap of enabled fields.
10181  */
10182 static uint8_t
10183 flow_dv_matcher_enable(uint32_t *match_criteria)
10184 {
10185         uint8_t match_criteria_enable;
10186
10187         match_criteria_enable =
10188                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
10189                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
10190         match_criteria_enable |=
10191                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
10192                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
10193         match_criteria_enable |=
10194                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
10195                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
10196         match_criteria_enable |=
10197                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
10198                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
10199         match_criteria_enable |=
10200                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
10201                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
10202         match_criteria_enable |=
10203                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
10204                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
10205         match_criteria_enable |=
10206                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_5)) <<
10207                 MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT;
10208         return match_criteria_enable;
10209 }
10210
10211 static void
10212 __flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria)
10213 {
10214         /*
10215          * Check flow matching criteria first, subtract misc5/4 length if flow
10216          * doesn't own misc5/4 parameters. In some old rdma-core releases,
10217          * misc5/4 are not supported, and matcher creation failure is expected
10218          * w/o subtraction. If misc5 is provided, misc4 must be counted in since
10219          * misc5 is right after misc4.
10220          */
10221         if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) {
10222                 *size = MLX5_ST_SZ_BYTES(fte_match_param) -
10223                         MLX5_ST_SZ_BYTES(fte_match_set_misc5);
10224                 if (!(match_criteria & (1 <<
10225                         MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT))) {
10226                         *size -= MLX5_ST_SZ_BYTES(fte_match_set_misc4);
10227                 }
10228         }
10229 }
10230
10231 static struct mlx5_list_entry *
10232 flow_dv_matcher_clone_cb(void *tool_ctx __rte_unused,
10233                          struct mlx5_list_entry *entry, void *cb_ctx)
10234 {
10235         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10236         struct mlx5_flow_dv_matcher *ref = ctx->data;
10237         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10238                                                             typeof(*tbl), tbl);
10239         struct mlx5_flow_dv_matcher *resource = mlx5_malloc(MLX5_MEM_ANY,
10240                                                             sizeof(*resource),
10241                                                             0, SOCKET_ID_ANY);
10242
10243         if (!resource) {
10244                 rte_flow_error_set(ctx->error, ENOMEM,
10245                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10246                                    "cannot create matcher");
10247                 return NULL;
10248         }
10249         memcpy(resource, entry, sizeof(*resource));
10250         resource->tbl = &tbl->tbl;
10251         return &resource->entry;
10252 }
10253
10254 static void
10255 flow_dv_matcher_clone_free_cb(void *tool_ctx __rte_unused,
10256                              struct mlx5_list_entry *entry)
10257 {
10258         mlx5_free(entry);
10259 }
10260
10261 struct mlx5_list_entry *
10262 flow_dv_tbl_create_cb(void *tool_ctx, void *cb_ctx)
10263 {
10264         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10265         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10266         struct rte_eth_dev *dev = ctx->dev;
10267         struct mlx5_flow_tbl_data_entry *tbl_data;
10268         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data2;
10269         struct rte_flow_error *error = ctx->error;
10270         union mlx5_flow_tbl_key key = { .v64 = *(uint64_t *)(ctx->data) };
10271         struct mlx5_flow_tbl_resource *tbl;
10272         void *domain;
10273         uint32_t idx = 0;
10274         int ret;
10275
10276         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10277         if (!tbl_data) {
10278                 rte_flow_error_set(error, ENOMEM,
10279                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10280                                    NULL,
10281                                    "cannot allocate flow table data entry");
10282                 return NULL;
10283         }
10284         tbl_data->idx = idx;
10285         tbl_data->tunnel = tt_prm->tunnel;
10286         tbl_data->group_id = tt_prm->group_id;
10287         tbl_data->external = !!tt_prm->external;
10288         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
10289         tbl_data->is_egress = !!key.is_egress;
10290         tbl_data->is_transfer = !!key.is_fdb;
10291         tbl_data->dummy = !!key.dummy;
10292         tbl_data->level = key.level;
10293         tbl_data->id = key.id;
10294         tbl = &tbl_data->tbl;
10295         if (key.dummy)
10296                 return &tbl_data->entry;
10297         if (key.is_fdb)
10298                 domain = sh->fdb_domain;
10299         else if (key.is_egress)
10300                 domain = sh->tx_domain;
10301         else
10302                 domain = sh->rx_domain;
10303         ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
10304         if (ret) {
10305                 rte_flow_error_set(error, ENOMEM,
10306                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10307                                    NULL, "cannot create flow table object");
10308                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10309                 return NULL;
10310         }
10311         if (key.level != 0) {
10312                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
10313                                         (tbl->obj, &tbl_data->jump.action);
10314                 if (ret) {
10315                         rte_flow_error_set(error, ENOMEM,
10316                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10317                                            NULL,
10318                                            "cannot create flow jump action");
10319                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10320                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10321                         return NULL;
10322                 }
10323         }
10324         MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
10325               key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
10326               key.level, key.id);
10327         tbl_data->matchers = mlx5_list_create(matcher_name, sh, true,
10328                                               flow_dv_matcher_create_cb,
10329                                               flow_dv_matcher_match_cb,
10330                                               flow_dv_matcher_remove_cb,
10331                                               flow_dv_matcher_clone_cb,
10332                                               flow_dv_matcher_clone_free_cb);
10333         if (!tbl_data->matchers) {
10334                 rte_flow_error_set(error, ENOMEM,
10335                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10336                                    NULL,
10337                                    "cannot create tbl matcher list");
10338                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10339                 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10340                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10341                 return NULL;
10342         }
10343         return &tbl_data->entry;
10344 }
10345
10346 int
10347 flow_dv_tbl_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10348                      void *cb_ctx)
10349 {
10350         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10351         struct mlx5_flow_tbl_data_entry *tbl_data =
10352                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10353         union mlx5_flow_tbl_key key = { .v64 =  *(uint64_t *)(ctx->data) };
10354
10355         return tbl_data->level != key.level ||
10356                tbl_data->id != key.id ||
10357                tbl_data->dummy != key.dummy ||
10358                tbl_data->is_transfer != !!key.is_fdb ||
10359                tbl_data->is_egress != !!key.is_egress;
10360 }
10361
10362 struct mlx5_list_entry *
10363 flow_dv_tbl_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10364                       void *cb_ctx)
10365 {
10366         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10367         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10368         struct mlx5_flow_tbl_data_entry *tbl_data;
10369         struct rte_flow_error *error = ctx->error;
10370         uint32_t idx = 0;
10371
10372         tbl_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10373         if (!tbl_data) {
10374                 rte_flow_error_set(error, ENOMEM,
10375                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10376                                    NULL,
10377                                    "cannot allocate flow table data entry");
10378                 return NULL;
10379         }
10380         memcpy(tbl_data, oentry, sizeof(*tbl_data));
10381         tbl_data->idx = idx;
10382         return &tbl_data->entry;
10383 }
10384
10385 void
10386 flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10387 {
10388         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10389         struct mlx5_flow_tbl_data_entry *tbl_data =
10390                     container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10391
10392         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10393 }
10394
10395 /**
10396  * Get a flow table.
10397  *
10398  * @param[in, out] dev
10399  *   Pointer to rte_eth_dev structure.
10400  * @param[in] table_level
10401  *   Table level to use.
10402  * @param[in] egress
10403  *   Direction of the table.
10404  * @param[in] transfer
10405  *   E-Switch or NIC flow.
10406  * @param[in] dummy
10407  *   Dummy entry for dv API.
10408  * @param[in] table_id
10409  *   Table id to use.
10410  * @param[out] error
10411  *   pointer to error structure.
10412  *
10413  * @return
10414  *   Returns tables resource based on the index, NULL in case of failed.
10415  */
10416 struct mlx5_flow_tbl_resource *
10417 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
10418                          uint32_t table_level, uint8_t egress,
10419                          uint8_t transfer,
10420                          bool external,
10421                          const struct mlx5_flow_tunnel *tunnel,
10422                          uint32_t group_id, uint8_t dummy,
10423                          uint32_t table_id,
10424                          struct rte_flow_error *error)
10425 {
10426         struct mlx5_priv *priv = dev->data->dev_private;
10427         union mlx5_flow_tbl_key table_key = {
10428                 {
10429                         .level = table_level,
10430                         .id = table_id,
10431                         .reserved = 0,
10432                         .dummy = !!dummy,
10433                         .is_fdb = !!transfer,
10434                         .is_egress = !!egress,
10435                 }
10436         };
10437         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
10438                 .tunnel = tunnel,
10439                 .group_id = group_id,
10440                 .external = external,
10441         };
10442         struct mlx5_flow_cb_ctx ctx = {
10443                 .dev = dev,
10444                 .error = error,
10445                 .data = &table_key.v64,
10446                 .data2 = &tt_prm,
10447         };
10448         struct mlx5_list_entry *entry;
10449         struct mlx5_flow_tbl_data_entry *tbl_data;
10450
10451         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
10452         if (!entry) {
10453                 rte_flow_error_set(error, ENOMEM,
10454                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10455                                    "cannot get table");
10456                 return NULL;
10457         }
10458         DRV_LOG(DEBUG, "table_level %u table_id %u "
10459                 "tunnel %u group %u registered.",
10460                 table_level, table_id,
10461                 tunnel ? tunnel->tunnel_id : 0, group_id);
10462         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10463         return &tbl_data->tbl;
10464 }
10465
10466 void
10467 flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10468 {
10469         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10470         struct mlx5_flow_tbl_data_entry *tbl_data =
10471                     container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10472
10473         MLX5_ASSERT(entry && sh);
10474         if (tbl_data->jump.action)
10475                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10476         if (tbl_data->tbl.obj)
10477                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
10478         if (tbl_data->tunnel_offload && tbl_data->external) {
10479                 struct mlx5_list_entry *he;
10480                 struct mlx5_hlist *tunnel_grp_hash;
10481                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
10482                 union tunnel_tbl_key tunnel_key = {
10483                         .tunnel_id = tbl_data->tunnel ?
10484                                         tbl_data->tunnel->tunnel_id : 0,
10485                         .group = tbl_data->group_id
10486                 };
10487                 uint32_t table_level = tbl_data->level;
10488                 struct mlx5_flow_cb_ctx ctx = {
10489                         .data = (void *)&tunnel_key.val,
10490                 };
10491
10492                 tunnel_grp_hash = tbl_data->tunnel ?
10493                                         tbl_data->tunnel->groups :
10494                                         thub->groups;
10495                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, &ctx);
10496                 if (he)
10497                         mlx5_hlist_unregister(tunnel_grp_hash, he);
10498                 DRV_LOG(DEBUG,
10499                         "table_level %u id %u tunnel %u group %u released.",
10500                         table_level,
10501                         tbl_data->id,
10502                         tbl_data->tunnel ?
10503                         tbl_data->tunnel->tunnel_id : 0,
10504                         tbl_data->group_id);
10505         }
10506         mlx5_list_destroy(tbl_data->matchers);
10507         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10508 }
10509
10510 /**
10511  * Release a flow table.
10512  *
10513  * @param[in] sh
10514  *   Pointer to device shared structure.
10515  * @param[in] tbl
10516  *   Table resource to be released.
10517  *
10518  * @return
10519  *   Returns 0 if table was released, else return 1;
10520  */
10521 static int
10522 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
10523                              struct mlx5_flow_tbl_resource *tbl)
10524 {
10525         struct mlx5_flow_tbl_data_entry *tbl_data =
10526                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10527
10528         if (!tbl)
10529                 return 0;
10530         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
10531 }
10532
10533 int
10534 flow_dv_matcher_match_cb(void *tool_ctx __rte_unused,
10535                          struct mlx5_list_entry *entry, void *cb_ctx)
10536 {
10537         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10538         struct mlx5_flow_dv_matcher *ref = ctx->data;
10539         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
10540                                                         entry);
10541
10542         return cur->crc != ref->crc ||
10543                cur->priority != ref->priority ||
10544                memcmp((const void *)cur->mask.buf,
10545                       (const void *)ref->mask.buf, ref->mask.size);
10546 }
10547
10548 struct mlx5_list_entry *
10549 flow_dv_matcher_create_cb(void *tool_ctx, void *cb_ctx)
10550 {
10551         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10552         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10553         struct mlx5_flow_dv_matcher *ref = ctx->data;
10554         struct mlx5_flow_dv_matcher *resource;
10555         struct mlx5dv_flow_matcher_attr dv_attr = {
10556                 .type = IBV_FLOW_ATTR_NORMAL,
10557                 .match_mask = (void *)&ref->mask,
10558         };
10559         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10560                                                             typeof(*tbl), tbl);
10561         int ret;
10562
10563         resource = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*resource), 0,
10564                                SOCKET_ID_ANY);
10565         if (!resource) {
10566                 rte_flow_error_set(ctx->error, ENOMEM,
10567                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10568                                    "cannot create matcher");
10569                 return NULL;
10570         }
10571         *resource = *ref;
10572         dv_attr.match_criteria_enable =
10573                 flow_dv_matcher_enable(resource->mask.buf);
10574         __flow_dv_adjust_buf_size(&ref->mask.size,
10575                                   dv_attr.match_criteria_enable);
10576         dv_attr.priority = ref->priority;
10577         if (tbl->is_egress)
10578                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
10579         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
10580                                                tbl->tbl.obj,
10581                                                &resource->matcher_object);
10582         if (ret) {
10583                 mlx5_free(resource);
10584                 rte_flow_error_set(ctx->error, ENOMEM,
10585                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10586                                    "cannot create matcher");
10587                 return NULL;
10588         }
10589         return &resource->entry;
10590 }
10591
10592 /**
10593  * Register the flow matcher.
10594  *
10595  * @param[in, out] dev
10596  *   Pointer to rte_eth_dev structure.
10597  * @param[in, out] matcher
10598  *   Pointer to flow matcher.
10599  * @param[in, out] key
10600  *   Pointer to flow table key.
10601  * @parm[in, out] dev_flow
10602  *   Pointer to the dev_flow.
10603  * @param[out] error
10604  *   pointer to error structure.
10605  *
10606  * @return
10607  *   0 on success otherwise -errno and errno is set.
10608  */
10609 static int
10610 flow_dv_matcher_register(struct rte_eth_dev *dev,
10611                          struct mlx5_flow_dv_matcher *ref,
10612                          union mlx5_flow_tbl_key *key,
10613                          struct mlx5_flow *dev_flow,
10614                          const struct mlx5_flow_tunnel *tunnel,
10615                          uint32_t group_id,
10616                          struct rte_flow_error *error)
10617 {
10618         struct mlx5_list_entry *entry;
10619         struct mlx5_flow_dv_matcher *resource;
10620         struct mlx5_flow_tbl_resource *tbl;
10621         struct mlx5_flow_tbl_data_entry *tbl_data;
10622         struct mlx5_flow_cb_ctx ctx = {
10623                 .error = error,
10624                 .data = ref,
10625         };
10626         /**
10627          * tunnel offload API requires this registration for cases when
10628          * tunnel match rule was inserted before tunnel set rule.
10629          */
10630         tbl = flow_dv_tbl_resource_get(dev, key->level,
10631                                        key->is_egress, key->is_fdb,
10632                                        dev_flow->external, tunnel,
10633                                        group_id, 0, key->id, error);
10634         if (!tbl)
10635                 return -rte_errno;      /* No need to refill the error info */
10636         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10637         ref->tbl = tbl;
10638         entry = mlx5_list_register(tbl_data->matchers, &ctx);
10639         if (!entry) {
10640                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10641                 return rte_flow_error_set(error, ENOMEM,
10642                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10643                                           "cannot allocate ref memory");
10644         }
10645         resource = container_of(entry, typeof(*resource), entry);
10646         dev_flow->handle->dvh.matcher = resource;
10647         return 0;
10648 }
10649
10650 struct mlx5_list_entry *
10651 flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx)
10652 {
10653         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10654         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10655         struct mlx5_flow_dv_tag_resource *entry;
10656         uint32_t idx = 0;
10657         int ret;
10658
10659         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10660         if (!entry) {
10661                 rte_flow_error_set(ctx->error, ENOMEM,
10662                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10663                                    "cannot allocate resource memory");
10664                 return NULL;
10665         }
10666         entry->idx = idx;
10667         entry->tag_id = *(uint32_t *)(ctx->data);
10668         ret = mlx5_flow_os_create_flow_action_tag(entry->tag_id,
10669                                                   &entry->action);
10670         if (ret) {
10671                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
10672                 rte_flow_error_set(ctx->error, ENOMEM,
10673                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10674                                    NULL, "cannot create action");
10675                 return NULL;
10676         }
10677         return &entry->entry;
10678 }
10679
10680 int
10681 flow_dv_tag_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10682                      void *cb_ctx)
10683 {
10684         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10685         struct mlx5_flow_dv_tag_resource *tag =
10686                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10687
10688         return *(uint32_t *)(ctx->data) != tag->tag_id;
10689 }
10690
10691 struct mlx5_list_entry *
10692 flow_dv_tag_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10693                      void *cb_ctx)
10694 {
10695         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10696         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10697         struct mlx5_flow_dv_tag_resource *entry;
10698         uint32_t idx = 0;
10699
10700         entry = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10701         if (!entry) {
10702                 rte_flow_error_set(ctx->error, ENOMEM,
10703                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10704                                    "cannot allocate tag resource memory");
10705                 return NULL;
10706         }
10707         memcpy(entry, oentry, sizeof(*entry));
10708         entry->idx = idx;
10709         return &entry->entry;
10710 }
10711
10712 void
10713 flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10714 {
10715         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10716         struct mlx5_flow_dv_tag_resource *tag =
10717                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10718
10719         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10720 }
10721
10722 /**
10723  * Find existing tag resource or create and register a new one.
10724  *
10725  * @param dev[in, out]
10726  *   Pointer to rte_eth_dev structure.
10727  * @param[in, out] tag_be24
10728  *   Tag value in big endian then R-shift 8.
10729  * @parm[in, out] dev_flow
10730  *   Pointer to the dev_flow.
10731  * @param[out] error
10732  *   pointer to error structure.
10733  *
10734  * @return
10735  *   0 on success otherwise -errno and errno is set.
10736  */
10737 static int
10738 flow_dv_tag_resource_register
10739                         (struct rte_eth_dev *dev,
10740                          uint32_t tag_be24,
10741                          struct mlx5_flow *dev_flow,
10742                          struct rte_flow_error *error)
10743 {
10744         struct mlx5_priv *priv = dev->data->dev_private;
10745         struct mlx5_flow_dv_tag_resource *resource;
10746         struct mlx5_list_entry *entry;
10747         struct mlx5_flow_cb_ctx ctx = {
10748                                         .error = error,
10749                                         .data = &tag_be24,
10750                                         };
10751         struct mlx5_hlist *tag_table;
10752
10753         tag_table = flow_dv_hlist_prepare(priv->sh, &priv->sh->tag_table,
10754                                       "tags",
10755                                       MLX5_TAGS_HLIST_ARRAY_SIZE,
10756                                       false, false, priv->sh,
10757                                       flow_dv_tag_create_cb,
10758                                       flow_dv_tag_match_cb,
10759                                       flow_dv_tag_remove_cb,
10760                                       flow_dv_tag_clone_cb,
10761                                       flow_dv_tag_clone_free_cb);
10762         if (unlikely(!tag_table))
10763                 return -rte_errno;
10764         entry = mlx5_hlist_register(tag_table, tag_be24, &ctx);
10765         if (entry) {
10766                 resource = container_of(entry, struct mlx5_flow_dv_tag_resource,
10767                                         entry);
10768                 dev_flow->handle->dvh.rix_tag = resource->idx;
10769                 dev_flow->dv.tag_resource = resource;
10770                 return 0;
10771         }
10772         return -rte_errno;
10773 }
10774
10775 void
10776 flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10777 {
10778         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10779         struct mlx5_flow_dv_tag_resource *tag =
10780                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10781
10782         MLX5_ASSERT(tag && sh && tag->action);
10783         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
10784         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
10785         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10786 }
10787
10788 /**
10789  * Release the tag.
10790  *
10791  * @param dev
10792  *   Pointer to Ethernet device.
10793  * @param tag_idx
10794  *   Tag index.
10795  *
10796  * @return
10797  *   1 while a reference on it exists, 0 when freed.
10798  */
10799 static int
10800 flow_dv_tag_release(struct rte_eth_dev *dev,
10801                     uint32_t tag_idx)
10802 {
10803         struct mlx5_priv *priv = dev->data->dev_private;
10804         struct mlx5_flow_dv_tag_resource *tag;
10805
10806         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
10807         if (!tag)
10808                 return 0;
10809         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
10810                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
10811         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
10812 }
10813
10814 /**
10815  * Translate action PORT_ID / REPRESENTED_PORT to vport.
10816  *
10817  * @param[in] dev
10818  *   Pointer to rte_eth_dev structure.
10819  * @param[in] action
10820  *   Pointer to action PORT_ID / REPRESENTED_PORT.
10821  * @param[out] dst_port_id
10822  *   The target port ID.
10823  * @param[out] error
10824  *   Pointer to the error structure.
10825  *
10826  * @return
10827  *   0 on success, a negative errno value otherwise and rte_errno is set.
10828  */
10829 static int
10830 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
10831                                  const struct rte_flow_action *action,
10832                                  uint32_t *dst_port_id,
10833                                  struct rte_flow_error *error)
10834 {
10835         uint32_t port;
10836         struct mlx5_priv *priv;
10837
10838         switch (action->type) {
10839         case RTE_FLOW_ACTION_TYPE_PORT_ID: {
10840                 const struct rte_flow_action_port_id *conf;
10841
10842                 conf = (const struct rte_flow_action_port_id *)action->conf;
10843                 port = conf->original ? dev->data->port_id : conf->id;
10844                 break;
10845         }
10846         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
10847                 const struct rte_flow_action_ethdev *ethdev;
10848
10849                 ethdev = (const struct rte_flow_action_ethdev *)action->conf;
10850                 port = ethdev->port_id;
10851                 break;
10852         }
10853         default:
10854                 MLX5_ASSERT(false);
10855                 return rte_flow_error_set(error, EINVAL,
10856                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
10857                                           "unknown E-Switch action");
10858         }
10859
10860         priv = mlx5_port_to_eswitch_info(port, false);
10861         if (!priv)
10862                 return rte_flow_error_set(error, -rte_errno,
10863                                           RTE_FLOW_ERROR_TYPE_ACTION,
10864                                           NULL,
10865                                           "No eswitch info was found for port");
10866 #ifdef HAVE_MLX5DV_DR_CREATE_DEST_IB_PORT
10867         /*
10868          * This parameter is transferred to
10869          * mlx5dv_dr_action_create_dest_ib_port().
10870          */
10871         *dst_port_id = priv->dev_port;
10872 #else
10873         /*
10874          * Legacy mode, no LAG configurations is supported.
10875          * This parameter is transferred to
10876          * mlx5dv_dr_action_create_dest_vport().
10877          */
10878         *dst_port_id = priv->vport_id;
10879 #endif
10880         return 0;
10881 }
10882
10883 /**
10884  * Create a counter with aging configuration.
10885  *
10886  * @param[in] dev
10887  *   Pointer to rte_eth_dev structure.
10888  * @param[in] dev_flow
10889  *   Pointer to the mlx5_flow.
10890  * @param[out] count
10891  *   Pointer to the counter action configuration.
10892  * @param[in] age
10893  *   Pointer to the aging action configuration.
10894  *
10895  * @return
10896  *   Index to flow counter on success, 0 otherwise.
10897  */
10898 static uint32_t
10899 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
10900                                 struct mlx5_flow *dev_flow,
10901                                 const struct rte_flow_action_count *count
10902                                         __rte_unused,
10903                                 const struct rte_flow_action_age *age)
10904 {
10905         uint32_t counter;
10906         struct mlx5_age_param *age_param;
10907
10908         counter = flow_dv_counter_alloc(dev, !!age);
10909         if (!counter || age == NULL)
10910                 return counter;
10911         age_param = flow_dv_counter_idx_get_age(dev, counter);
10912         age_param->context = age->context ? age->context :
10913                 (void *)(uintptr_t)(dev_flow->flow_idx);
10914         age_param->timeout = age->timeout;
10915         age_param->port_id = dev->data->port_id;
10916         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
10917         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
10918         return counter;
10919 }
10920
10921 /**
10922  * Add Tx queue matcher
10923  *
10924  * @param[in] dev
10925  *   Pointer to the dev struct.
10926  * @param[in, out] matcher
10927  *   Flow matcher.
10928  * @param[in, out] key
10929  *   Flow matcher value.
10930  * @param[in] item
10931  *   Flow pattern to translate.
10932  * @param[in] inner
10933  *   Item is inner pattern.
10934  */
10935 static void
10936 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
10937                                 void *matcher, void *key,
10938                                 const struct rte_flow_item *item)
10939 {
10940         const struct mlx5_rte_flow_item_tx_queue *queue_m;
10941         const struct mlx5_rte_flow_item_tx_queue *queue_v;
10942         void *misc_m =
10943                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
10944         void *misc_v =
10945                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
10946         struct mlx5_txq_ctrl *txq;
10947         uint32_t queue, mask;
10948
10949         queue_m = (const void *)item->mask;
10950         queue_v = (const void *)item->spec;
10951         if (!queue_v)
10952                 return;
10953         txq = mlx5_txq_get(dev, queue_v->queue);
10954         if (!txq)
10955                 return;
10956         if (txq->type == MLX5_TXQ_TYPE_HAIRPIN)
10957                 queue = txq->obj->sq->id;
10958         else
10959                 queue = txq->obj->sq_obj.sq->id;
10960         mask = queue_m == NULL ? UINT32_MAX : queue_m->queue;
10961         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, mask);
10962         MLX5_SET(fte_match_set_misc, misc_v, source_sqn, queue & mask);
10963         mlx5_txq_release(dev, queue_v->queue);
10964 }
10965
10966 /**
10967  * Set the hash fields according to the @p flow information.
10968  *
10969  * @param[in] dev_flow
10970  *   Pointer to the mlx5_flow.
10971  * @param[in] rss_desc
10972  *   Pointer to the mlx5_flow_rss_desc.
10973  */
10974 static void
10975 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
10976                        struct mlx5_flow_rss_desc *rss_desc)
10977 {
10978         uint64_t items = dev_flow->handle->layers;
10979         int rss_inner = 0;
10980         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
10981
10982         dev_flow->hash_fields = 0;
10983 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
10984         if (rss_desc->level >= 2)
10985                 rss_inner = 1;
10986 #endif
10987         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
10988             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
10989                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
10990                         if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
10991                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
10992                         else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
10993                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
10994                         else
10995                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
10996                 }
10997         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
10998                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
10999                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
11000                         if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
11001                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
11002                         else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
11003                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
11004                         else
11005                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
11006                 }
11007         }
11008         if (dev_flow->hash_fields == 0)
11009                 /*
11010                  * There is no match between the RSS types and the
11011                  * L3 protocol (IPv4/IPv6) defined in the flow rule.
11012                  */
11013                 return;
11014         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
11015             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
11016                 if (rss_types & RTE_ETH_RSS_UDP) {
11017                         if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
11018                                 dev_flow->hash_fields |=
11019                                                 IBV_RX_HASH_SRC_PORT_UDP;
11020                         else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
11021                                 dev_flow->hash_fields |=
11022                                                 IBV_RX_HASH_DST_PORT_UDP;
11023                         else
11024                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
11025                 }
11026         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
11027                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
11028                 if (rss_types & RTE_ETH_RSS_TCP) {
11029                         if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
11030                                 dev_flow->hash_fields |=
11031                                                 IBV_RX_HASH_SRC_PORT_TCP;
11032                         else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
11033                                 dev_flow->hash_fields |=
11034                                                 IBV_RX_HASH_DST_PORT_TCP;
11035                         else
11036                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
11037                 }
11038         }
11039         if (rss_inner)
11040                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
11041 }
11042
11043 /**
11044  * Prepare an Rx Hash queue.
11045  *
11046  * @param dev
11047  *   Pointer to Ethernet device.
11048  * @param[in] dev_flow
11049  *   Pointer to the mlx5_flow.
11050  * @param[in] rss_desc
11051  *   Pointer to the mlx5_flow_rss_desc.
11052  * @param[out] hrxq_idx
11053  *   Hash Rx queue index.
11054  *
11055  * @return
11056  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
11057  */
11058 static struct mlx5_hrxq *
11059 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
11060                      struct mlx5_flow *dev_flow,
11061                      struct mlx5_flow_rss_desc *rss_desc,
11062                      uint32_t *hrxq_idx)
11063 {
11064         struct mlx5_priv *priv = dev->data->dev_private;
11065         struct mlx5_flow_handle *dh = dev_flow->handle;
11066         struct mlx5_hrxq *hrxq;
11067
11068         MLX5_ASSERT(rss_desc->queue_num);
11069         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
11070         rss_desc->hash_fields = dev_flow->hash_fields;
11071         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
11072         rss_desc->shared_rss = 0;
11073         if (rss_desc->hash_fields == 0)
11074                 rss_desc->queue_num = 1;
11075         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
11076         if (!*hrxq_idx)
11077                 return NULL;
11078         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
11079                               *hrxq_idx);
11080         return hrxq;
11081 }
11082
11083 /**
11084  * Release sample sub action resource.
11085  *
11086  * @param[in, out] dev
11087  *   Pointer to rte_eth_dev structure.
11088  * @param[in] act_res
11089  *   Pointer to sample sub action resource.
11090  */
11091 static void
11092 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
11093                                    struct mlx5_flow_sub_actions_idx *act_res)
11094 {
11095         if (act_res->rix_hrxq) {
11096                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
11097                 act_res->rix_hrxq = 0;
11098         }
11099         if (act_res->rix_encap_decap) {
11100                 flow_dv_encap_decap_resource_release(dev,
11101                                                      act_res->rix_encap_decap);
11102                 act_res->rix_encap_decap = 0;
11103         }
11104         if (act_res->rix_port_id_action) {
11105                 flow_dv_port_id_action_resource_release(dev,
11106                                                 act_res->rix_port_id_action);
11107                 act_res->rix_port_id_action = 0;
11108         }
11109         if (act_res->rix_tag) {
11110                 flow_dv_tag_release(dev, act_res->rix_tag);
11111                 act_res->rix_tag = 0;
11112         }
11113         if (act_res->rix_jump) {
11114                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
11115                 act_res->rix_jump = 0;
11116         }
11117 }
11118
11119 int
11120 flow_dv_sample_match_cb(void *tool_ctx __rte_unused,
11121                         struct mlx5_list_entry *entry, void *cb_ctx)
11122 {
11123         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11124         struct rte_eth_dev *dev = ctx->dev;
11125         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
11126         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
11127                                                               typeof(*resource),
11128                                                               entry);
11129
11130         if (ctx_resource->ratio == resource->ratio &&
11131             ctx_resource->ft_type == resource->ft_type &&
11132             ctx_resource->ft_id == resource->ft_id &&
11133             ctx_resource->set_action == resource->set_action &&
11134             !memcmp((void *)&ctx_resource->sample_act,
11135                     (void *)&resource->sample_act,
11136                     sizeof(struct mlx5_flow_sub_actions_list))) {
11137                 /*
11138                  * Existing sample action should release the prepared
11139                  * sub-actions reference counter.
11140                  */
11141                 flow_dv_sample_sub_actions_release(dev,
11142                                                    &ctx_resource->sample_idx);
11143                 return 0;
11144         }
11145         return 1;
11146 }
11147
11148 struct mlx5_list_entry *
11149 flow_dv_sample_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11150 {
11151         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11152         struct rte_eth_dev *dev = ctx->dev;
11153         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
11154         void **sample_dv_actions = ctx_resource->sub_actions;
11155         struct mlx5_flow_dv_sample_resource *resource;
11156         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
11157         struct mlx5_priv *priv = dev->data->dev_private;
11158         struct mlx5_dev_ctx_shared *sh = priv->sh;
11159         struct mlx5_flow_tbl_resource *tbl;
11160         uint32_t idx = 0;
11161         const uint32_t next_ft_step = 1;
11162         uint32_t next_ft_id = ctx_resource->ft_id + next_ft_step;
11163         uint8_t is_egress = 0;
11164         uint8_t is_transfer = 0;
11165         struct rte_flow_error *error = ctx->error;
11166
11167         /* Register new sample resource. */
11168         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11169         if (!resource) {
11170                 rte_flow_error_set(error, ENOMEM,
11171                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11172                                           NULL,
11173                                           "cannot allocate resource memory");
11174                 return NULL;
11175         }
11176         *resource = *ctx_resource;
11177         /* Create normal path table level */
11178         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11179                 is_transfer = 1;
11180         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
11181                 is_egress = 1;
11182         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
11183                                         is_egress, is_transfer,
11184                                         true, NULL, 0, 0, 0, error);
11185         if (!tbl) {
11186                 rte_flow_error_set(error, ENOMEM,
11187                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11188                                           NULL,
11189                                           "fail to create normal path table "
11190                                           "for sample");
11191                 goto error;
11192         }
11193         resource->normal_path_tbl = tbl;
11194         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
11195                 if (!sh->default_miss_action) {
11196                         rte_flow_error_set(error, ENOMEM,
11197                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11198                                                 NULL,
11199                                                 "default miss action was not "
11200                                                 "created");
11201                         goto error;
11202                 }
11203                 sample_dv_actions[ctx_resource->sample_act.actions_num++] =
11204                                                 sh->default_miss_action;
11205         }
11206         /* Create a DR sample action */
11207         sampler_attr.sample_ratio = resource->ratio;
11208         sampler_attr.default_next_table = tbl->obj;
11209         sampler_attr.num_sample_actions = ctx_resource->sample_act.actions_num;
11210         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
11211                                                         &sample_dv_actions[0];
11212         sampler_attr.action = resource->set_action;
11213         if (mlx5_os_flow_dr_create_flow_action_sampler
11214                         (&sampler_attr, &resource->verbs_action)) {
11215                 rte_flow_error_set(error, ENOMEM,
11216                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11217                                         NULL, "cannot create sample action");
11218                 goto error;
11219         }
11220         resource->idx = idx;
11221         resource->dev = dev;
11222         return &resource->entry;
11223 error:
11224         if (resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
11225                 flow_dv_sample_sub_actions_release(dev,
11226                                                    &resource->sample_idx);
11227         if (resource->normal_path_tbl)
11228                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11229                                 resource->normal_path_tbl);
11230         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
11231         return NULL;
11232
11233 }
11234
11235 struct mlx5_list_entry *
11236 flow_dv_sample_clone_cb(void *tool_ctx __rte_unused,
11237                          struct mlx5_list_entry *entry __rte_unused,
11238                          void *cb_ctx)
11239 {
11240         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11241         struct rte_eth_dev *dev = ctx->dev;
11242         struct mlx5_flow_dv_sample_resource *resource;
11243         struct mlx5_priv *priv = dev->data->dev_private;
11244         struct mlx5_dev_ctx_shared *sh = priv->sh;
11245         uint32_t idx = 0;
11246
11247         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11248         if (!resource) {
11249                 rte_flow_error_set(ctx->error, ENOMEM,
11250                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11251                                           NULL,
11252                                           "cannot allocate resource memory");
11253                 return NULL;
11254         }
11255         memcpy(resource, entry, sizeof(*resource));
11256         resource->idx = idx;
11257         resource->dev = dev;
11258         return &resource->entry;
11259 }
11260
11261 void
11262 flow_dv_sample_clone_free_cb(void *tool_ctx __rte_unused,
11263                              struct mlx5_list_entry *entry)
11264 {
11265         struct mlx5_flow_dv_sample_resource *resource =
11266                                   container_of(entry, typeof(*resource), entry);
11267         struct rte_eth_dev *dev = resource->dev;
11268         struct mlx5_priv *priv = dev->data->dev_private;
11269
11270         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
11271 }
11272
11273 /**
11274  * Find existing sample resource or create and register a new one.
11275  *
11276  * @param[in, out] dev
11277  *   Pointer to rte_eth_dev structure.
11278  * @param[in] ref
11279  *   Pointer to sample resource reference.
11280  * @parm[in, out] dev_flow
11281  *   Pointer to the dev_flow.
11282  * @param[out] error
11283  *   pointer to error structure.
11284  *
11285  * @return
11286  *   0 on success otherwise -errno and errno is set.
11287  */
11288 static int
11289 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
11290                          struct mlx5_flow_dv_sample_resource *ref,
11291                          struct mlx5_flow *dev_flow,
11292                          struct rte_flow_error *error)
11293 {
11294         struct mlx5_flow_dv_sample_resource *resource;
11295         struct mlx5_list_entry *entry;
11296         struct mlx5_priv *priv = dev->data->dev_private;
11297         struct mlx5_flow_cb_ctx ctx = {
11298                 .dev = dev,
11299                 .error = error,
11300                 .data = ref,
11301         };
11302
11303         entry = mlx5_list_register(priv->sh->sample_action_list, &ctx);
11304         if (!entry)
11305                 return -rte_errno;
11306         resource = container_of(entry, typeof(*resource), entry);
11307         dev_flow->handle->dvh.rix_sample = resource->idx;
11308         dev_flow->dv.sample_res = resource;
11309         return 0;
11310 }
11311
11312 int
11313 flow_dv_dest_array_match_cb(void *tool_ctx __rte_unused,
11314                             struct mlx5_list_entry *entry, void *cb_ctx)
11315 {
11316         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11317         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11318         struct rte_eth_dev *dev = ctx->dev;
11319         struct mlx5_flow_dv_dest_array_resource *resource =
11320                                   container_of(entry, typeof(*resource), entry);
11321         uint32_t idx = 0;
11322
11323         if (ctx_resource->num_of_dest == resource->num_of_dest &&
11324             ctx_resource->ft_type == resource->ft_type &&
11325             !memcmp((void *)resource->sample_act,
11326                     (void *)ctx_resource->sample_act,
11327                    (ctx_resource->num_of_dest *
11328                    sizeof(struct mlx5_flow_sub_actions_list)))) {
11329                 /*
11330                  * Existing sample action should release the prepared
11331                  * sub-actions reference counter.
11332                  */
11333                 for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11334                         flow_dv_sample_sub_actions_release(dev,
11335                                         &ctx_resource->sample_idx[idx]);
11336                 return 0;
11337         }
11338         return 1;
11339 }
11340
11341 struct mlx5_list_entry *
11342 flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11343 {
11344         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11345         struct rte_eth_dev *dev = ctx->dev;
11346         struct mlx5_flow_dv_dest_array_resource *resource;
11347         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11348         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
11349         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
11350         struct mlx5_priv *priv = dev->data->dev_private;
11351         struct mlx5_dev_ctx_shared *sh = priv->sh;
11352         struct mlx5_flow_sub_actions_list *sample_act;
11353         struct mlx5dv_dr_domain *domain;
11354         uint32_t idx = 0, res_idx = 0;
11355         struct rte_flow_error *error = ctx->error;
11356         uint64_t action_flags;
11357         int ret;
11358
11359         /* Register new destination array resource. */
11360         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11361                                             &res_idx);
11362         if (!resource) {
11363                 rte_flow_error_set(error, ENOMEM,
11364                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11365                                           NULL,
11366                                           "cannot allocate resource memory");
11367                 return NULL;
11368         }
11369         *resource = *ctx_resource;
11370         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11371                 domain = sh->fdb_domain;
11372         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
11373                 domain = sh->rx_domain;
11374         else
11375                 domain = sh->tx_domain;
11376         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11377                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
11378                                  mlx5_malloc(MLX5_MEM_ZERO,
11379                                  sizeof(struct mlx5dv_dr_action_dest_attr),
11380                                  0, SOCKET_ID_ANY);
11381                 if (!dest_attr[idx]) {
11382                         rte_flow_error_set(error, ENOMEM,
11383                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11384                                            NULL,
11385                                            "cannot allocate resource memory");
11386                         goto error;
11387                 }
11388                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
11389                 sample_act = &ctx_resource->sample_act[idx];
11390                 action_flags = sample_act->action_flags;
11391                 switch (action_flags) {
11392                 case MLX5_FLOW_ACTION_QUEUE:
11393                         dest_attr[idx]->dest = sample_act->dr_queue_action;
11394                         break;
11395                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
11396                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
11397                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
11398                         dest_attr[idx]->dest_reformat->reformat =
11399                                         sample_act->dr_encap_action;
11400                         dest_attr[idx]->dest_reformat->dest =
11401                                         sample_act->dr_port_id_action;
11402                         break;
11403                 case MLX5_FLOW_ACTION_PORT_ID:
11404                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
11405                         break;
11406                 case MLX5_FLOW_ACTION_JUMP:
11407                         dest_attr[idx]->dest = sample_act->dr_jump_action;
11408                         break;
11409                 default:
11410                         rte_flow_error_set(error, EINVAL,
11411                                            RTE_FLOW_ERROR_TYPE_ACTION,
11412                                            NULL,
11413                                            "unsupported actions type");
11414                         goto error;
11415                 }
11416         }
11417         /* create a dest array action */
11418         ret = mlx5_os_flow_dr_create_flow_action_dest_array
11419                                                 (domain,
11420                                                  resource->num_of_dest,
11421                                                  dest_attr,
11422                                                  &resource->action);
11423         if (ret) {
11424                 rte_flow_error_set(error, ENOMEM,
11425                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11426                                    NULL,
11427                                    "cannot create destination array action");
11428                 goto error;
11429         }
11430         resource->idx = res_idx;
11431         resource->dev = dev;
11432         for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11433                 mlx5_free(dest_attr[idx]);
11434         return &resource->entry;
11435 error:
11436         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11437                 flow_dv_sample_sub_actions_release(dev,
11438                                                    &resource->sample_idx[idx]);
11439                 if (dest_attr[idx])
11440                         mlx5_free(dest_attr[idx]);
11441         }
11442         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
11443         return NULL;
11444 }
11445
11446 struct mlx5_list_entry *
11447 flow_dv_dest_array_clone_cb(void *tool_ctx __rte_unused,
11448                             struct mlx5_list_entry *entry __rte_unused,
11449                             void *cb_ctx)
11450 {
11451         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11452         struct rte_eth_dev *dev = ctx->dev;
11453         struct mlx5_flow_dv_dest_array_resource *resource;
11454         struct mlx5_priv *priv = dev->data->dev_private;
11455         struct mlx5_dev_ctx_shared *sh = priv->sh;
11456         uint32_t res_idx = 0;
11457         struct rte_flow_error *error = ctx->error;
11458
11459         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11460                                       &res_idx);
11461         if (!resource) {
11462                 rte_flow_error_set(error, ENOMEM,
11463                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11464                                           NULL,
11465                                           "cannot allocate dest-array memory");
11466                 return NULL;
11467         }
11468         memcpy(resource, entry, sizeof(*resource));
11469         resource->idx = res_idx;
11470         resource->dev = dev;
11471         return &resource->entry;
11472 }
11473
11474 void
11475 flow_dv_dest_array_clone_free_cb(void *tool_ctx __rte_unused,
11476                                  struct mlx5_list_entry *entry)
11477 {
11478         struct mlx5_flow_dv_dest_array_resource *resource =
11479                         container_of(entry, typeof(*resource), entry);
11480         struct rte_eth_dev *dev = resource->dev;
11481         struct mlx5_priv *priv = dev->data->dev_private;
11482
11483         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
11484 }
11485
11486 /**
11487  * Find existing destination array resource or create and register a new one.
11488  *
11489  * @param[in, out] dev
11490  *   Pointer to rte_eth_dev structure.
11491  * @param[in] ref
11492  *   Pointer to destination array resource reference.
11493  * @parm[in, out] dev_flow
11494  *   Pointer to the dev_flow.
11495  * @param[out] error
11496  *   pointer to error structure.
11497  *
11498  * @return
11499  *   0 on success otherwise -errno and errno is set.
11500  */
11501 static int
11502 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
11503                          struct mlx5_flow_dv_dest_array_resource *ref,
11504                          struct mlx5_flow *dev_flow,
11505                          struct rte_flow_error *error)
11506 {
11507         struct mlx5_flow_dv_dest_array_resource *resource;
11508         struct mlx5_priv *priv = dev->data->dev_private;
11509         struct mlx5_list_entry *entry;
11510         struct mlx5_flow_cb_ctx ctx = {
11511                 .dev = dev,
11512                 .error = error,
11513                 .data = ref,
11514         };
11515
11516         entry = mlx5_list_register(priv->sh->dest_array_list, &ctx);
11517         if (!entry)
11518                 return -rte_errno;
11519         resource = container_of(entry, typeof(*resource), entry);
11520         dev_flow->handle->dvh.rix_dest_array = resource->idx;
11521         dev_flow->dv.dest_array_res = resource;
11522         return 0;
11523 }
11524
11525 /**
11526  * Convert Sample action to DV specification.
11527  *
11528  * @param[in] dev
11529  *   Pointer to rte_eth_dev structure.
11530  * @param[in] action
11531  *   Pointer to sample action structure.
11532  * @param[in, out] dev_flow
11533  *   Pointer to the mlx5_flow.
11534  * @param[in] attr
11535  *   Pointer to the flow attributes.
11536  * @param[in, out] num_of_dest
11537  *   Pointer to the num of destination.
11538  * @param[in, out] sample_actions
11539  *   Pointer to sample actions list.
11540  * @param[in, out] res
11541  *   Pointer to sample resource.
11542  * @param[out] error
11543  *   Pointer to the error structure.
11544  *
11545  * @return
11546  *   0 on success, a negative errno value otherwise and rte_errno is set.
11547  */
11548 static int
11549 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
11550                                 const struct rte_flow_action_sample *action,
11551                                 struct mlx5_flow *dev_flow,
11552                                 const struct rte_flow_attr *attr,
11553                                 uint32_t *num_of_dest,
11554                                 void **sample_actions,
11555                                 struct mlx5_flow_dv_sample_resource *res,
11556                                 struct rte_flow_error *error)
11557 {
11558         struct mlx5_priv *priv = dev->data->dev_private;
11559         const struct rte_flow_action *sub_actions;
11560         struct mlx5_flow_sub_actions_list *sample_act;
11561         struct mlx5_flow_sub_actions_idx *sample_idx;
11562         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11563         struct rte_flow *flow = dev_flow->flow;
11564         struct mlx5_flow_rss_desc *rss_desc;
11565         uint64_t action_flags = 0;
11566
11567         MLX5_ASSERT(wks);
11568         rss_desc = &wks->rss_desc;
11569         sample_act = &res->sample_act;
11570         sample_idx = &res->sample_idx;
11571         res->ratio = action->ratio;
11572         sub_actions = action->actions;
11573         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
11574                 int type = sub_actions->type;
11575                 uint32_t pre_rix = 0;
11576                 void *pre_r;
11577                 switch (type) {
11578                 case RTE_FLOW_ACTION_TYPE_QUEUE:
11579                 {
11580                         const struct rte_flow_action_queue *queue;
11581                         struct mlx5_hrxq *hrxq;
11582                         uint32_t hrxq_idx;
11583
11584                         queue = sub_actions->conf;
11585                         rss_desc->queue_num = 1;
11586                         rss_desc->queue[0] = queue->index;
11587                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11588                                                     rss_desc, &hrxq_idx);
11589                         if (!hrxq)
11590                                 return rte_flow_error_set
11591                                         (error, rte_errno,
11592                                          RTE_FLOW_ERROR_TYPE_ACTION,
11593                                          NULL,
11594                                          "cannot create fate queue");
11595                         sample_act->dr_queue_action = hrxq->action;
11596                         sample_idx->rix_hrxq = hrxq_idx;
11597                         sample_actions[sample_act->actions_num++] =
11598                                                 hrxq->action;
11599                         (*num_of_dest)++;
11600                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
11601                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11602                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11603                         dev_flow->handle->fate_action =
11604                                         MLX5_FLOW_FATE_QUEUE;
11605                         break;
11606                 }
11607                 case RTE_FLOW_ACTION_TYPE_RSS:
11608                 {
11609                         struct mlx5_hrxq *hrxq;
11610                         uint32_t hrxq_idx;
11611                         const struct rte_flow_action_rss *rss;
11612                         const uint8_t *rss_key;
11613
11614                         rss = sub_actions->conf;
11615                         memcpy(rss_desc->queue, rss->queue,
11616                                rss->queue_num * sizeof(uint16_t));
11617                         rss_desc->queue_num = rss->queue_num;
11618                         /* NULL RSS key indicates default RSS key. */
11619                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11620                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11621                         /*
11622                          * rss->level and rss.types should be set in advance
11623                          * when expanding items for RSS.
11624                          */
11625                         flow_dv_hashfields_set(dev_flow, rss_desc);
11626                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11627                                                     rss_desc, &hrxq_idx);
11628                         if (!hrxq)
11629                                 return rte_flow_error_set
11630                                         (error, rte_errno,
11631                                          RTE_FLOW_ERROR_TYPE_ACTION,
11632                                          NULL,
11633                                          "cannot create fate queue");
11634                         sample_act->dr_queue_action = hrxq->action;
11635                         sample_idx->rix_hrxq = hrxq_idx;
11636                         sample_actions[sample_act->actions_num++] =
11637                                                 hrxq->action;
11638                         (*num_of_dest)++;
11639                         action_flags |= MLX5_FLOW_ACTION_RSS;
11640                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11641                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11642                         dev_flow->handle->fate_action =
11643                                         MLX5_FLOW_FATE_QUEUE;
11644                         break;
11645                 }
11646                 case RTE_FLOW_ACTION_TYPE_MARK:
11647                 {
11648                         uint32_t tag_be = mlx5_flow_mark_set
11649                                 (((const struct rte_flow_action_mark *)
11650                                 (sub_actions->conf))->id);
11651
11652                         wks->mark = 1;
11653                         pre_rix = dev_flow->handle->dvh.rix_tag;
11654                         /* Save the mark resource before sample */
11655                         pre_r = dev_flow->dv.tag_resource;
11656                         if (flow_dv_tag_resource_register(dev, tag_be,
11657                                                   dev_flow, error))
11658                                 return -rte_errno;
11659                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11660                         sample_act->dr_tag_action =
11661                                 dev_flow->dv.tag_resource->action;
11662                         sample_idx->rix_tag =
11663                                 dev_flow->handle->dvh.rix_tag;
11664                         sample_actions[sample_act->actions_num++] =
11665                                                 sample_act->dr_tag_action;
11666                         /* Recover the mark resource after sample */
11667                         dev_flow->dv.tag_resource = pre_r;
11668                         dev_flow->handle->dvh.rix_tag = pre_rix;
11669                         action_flags |= MLX5_FLOW_ACTION_MARK;
11670                         break;
11671                 }
11672                 case RTE_FLOW_ACTION_TYPE_COUNT:
11673                 {
11674                         if (!flow->counter) {
11675                                 flow->counter =
11676                                         flow_dv_translate_create_counter(dev,
11677                                                 dev_flow, sub_actions->conf,
11678                                                 0);
11679                                 if (!flow->counter)
11680                                         return rte_flow_error_set
11681                                                 (error, rte_errno,
11682                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11683                                                 NULL,
11684                                                 "cannot create counter"
11685                                                 " object.");
11686                         }
11687                         sample_act->dr_cnt_action =
11688                                   (flow_dv_counter_get_by_idx(dev,
11689                                   flow->counter, NULL))->action;
11690                         sample_actions[sample_act->actions_num++] =
11691                                                 sample_act->dr_cnt_action;
11692                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11693                         break;
11694                 }
11695                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11696                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
11697                 {
11698                         struct mlx5_flow_dv_port_id_action_resource
11699                                         port_id_resource;
11700                         uint32_t port_id = 0;
11701
11702                         memset(&port_id_resource, 0, sizeof(port_id_resource));
11703                         /* Save the port id resource before sample */
11704                         pre_rix = dev_flow->handle->rix_port_id_action;
11705                         pre_r = dev_flow->dv.port_id_action;
11706                         if (flow_dv_translate_action_port_id(dev, sub_actions,
11707                                                              &port_id, error))
11708                                 return -rte_errno;
11709                         port_id_resource.port_id = port_id;
11710                         if (flow_dv_port_id_action_resource_register
11711                             (dev, &port_id_resource, dev_flow, error))
11712                                 return -rte_errno;
11713                         sample_act->dr_port_id_action =
11714                                 dev_flow->dv.port_id_action->action;
11715                         sample_idx->rix_port_id_action =
11716                                 dev_flow->handle->rix_port_id_action;
11717                         sample_actions[sample_act->actions_num++] =
11718                                                 sample_act->dr_port_id_action;
11719                         /* Recover the port id resource after sample */
11720                         dev_flow->dv.port_id_action = pre_r;
11721                         dev_flow->handle->rix_port_id_action = pre_rix;
11722                         (*num_of_dest)++;
11723                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11724                         break;
11725                 }
11726                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11727                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11728                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11729                         /* Save the encap resource before sample */
11730                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
11731                         pre_r = dev_flow->dv.encap_decap;
11732                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
11733                                                            dev_flow,
11734                                                            attr->transfer,
11735                                                            error))
11736                                 return -rte_errno;
11737                         sample_act->dr_encap_action =
11738                                 dev_flow->dv.encap_decap->action;
11739                         sample_idx->rix_encap_decap =
11740                                 dev_flow->handle->dvh.rix_encap_decap;
11741                         sample_actions[sample_act->actions_num++] =
11742                                                 sample_act->dr_encap_action;
11743                         /* Recover the encap resource after sample */
11744                         dev_flow->dv.encap_decap = pre_r;
11745                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
11746                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11747                         break;
11748                 default:
11749                         return rte_flow_error_set(error, EINVAL,
11750                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11751                                 NULL,
11752                                 "Not support for sampler action");
11753                 }
11754         }
11755         sample_act->action_flags = action_flags;
11756         res->ft_id = dev_flow->dv.group;
11757         if (attr->transfer) {
11758                 union {
11759                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
11760                         uint64_t set_action;
11761                 } action_ctx = { .set_action = 0 };
11762
11763                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
11764                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
11765                          MLX5_MODIFICATION_TYPE_SET);
11766                 MLX5_SET(set_action_in, action_ctx.action_in, field,
11767                          MLX5_MODI_META_REG_C_0);
11768                 MLX5_SET(set_action_in, action_ctx.action_in, data,
11769                          priv->vport_meta_tag);
11770                 res->set_action = action_ctx.set_action;
11771         } else if (attr->ingress) {
11772                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11773         } else {
11774                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
11775         }
11776         return 0;
11777 }
11778
11779 /**
11780  * Convert Sample action to DV specification.
11781  *
11782  * @param[in] dev
11783  *   Pointer to rte_eth_dev structure.
11784  * @param[in, out] dev_flow
11785  *   Pointer to the mlx5_flow.
11786  * @param[in] num_of_dest
11787  *   The num of destination.
11788  * @param[in, out] res
11789  *   Pointer to sample resource.
11790  * @param[in, out] mdest_res
11791  *   Pointer to destination array resource.
11792  * @param[in] sample_actions
11793  *   Pointer to sample path actions list.
11794  * @param[in] action_flags
11795  *   Holds the actions detected until now.
11796  * @param[out] error
11797  *   Pointer to the error structure.
11798  *
11799  * @return
11800  *   0 on success, a negative errno value otherwise and rte_errno is set.
11801  */
11802 static int
11803 flow_dv_create_action_sample(struct rte_eth_dev *dev,
11804                              struct mlx5_flow *dev_flow,
11805                              uint32_t num_of_dest,
11806                              struct mlx5_flow_dv_sample_resource *res,
11807                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
11808                              void **sample_actions,
11809                              uint64_t action_flags,
11810                              struct rte_flow_error *error)
11811 {
11812         /* update normal path action resource into last index of array */
11813         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
11814         struct mlx5_flow_sub_actions_list *sample_act =
11815                                         &mdest_res->sample_act[dest_index];
11816         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11817         struct mlx5_flow_rss_desc *rss_desc;
11818         uint32_t normal_idx = 0;
11819         struct mlx5_hrxq *hrxq;
11820         uint32_t hrxq_idx;
11821
11822         MLX5_ASSERT(wks);
11823         rss_desc = &wks->rss_desc;
11824         if (num_of_dest > 1) {
11825                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
11826                         /* Handle QP action for mirroring */
11827                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11828                                                     rss_desc, &hrxq_idx);
11829                         if (!hrxq)
11830                                 return rte_flow_error_set
11831                                      (error, rte_errno,
11832                                       RTE_FLOW_ERROR_TYPE_ACTION,
11833                                       NULL,
11834                                       "cannot create rx queue");
11835                         normal_idx++;
11836                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
11837                         sample_act->dr_queue_action = hrxq->action;
11838                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11839                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11840                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11841                 }
11842                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
11843                         normal_idx++;
11844                         mdest_res->sample_idx[dest_index].rix_encap_decap =
11845                                 dev_flow->handle->dvh.rix_encap_decap;
11846                         sample_act->dr_encap_action =
11847                                 dev_flow->dv.encap_decap->action;
11848                         dev_flow->handle->dvh.rix_encap_decap = 0;
11849                 }
11850                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
11851                         normal_idx++;
11852                         mdest_res->sample_idx[dest_index].rix_port_id_action =
11853                                 dev_flow->handle->rix_port_id_action;
11854                         sample_act->dr_port_id_action =
11855                                 dev_flow->dv.port_id_action->action;
11856                         dev_flow->handle->rix_port_id_action = 0;
11857                 }
11858                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
11859                         normal_idx++;
11860                         mdest_res->sample_idx[dest_index].rix_jump =
11861                                 dev_flow->handle->rix_jump;
11862                         sample_act->dr_jump_action =
11863                                 dev_flow->dv.jump->action;
11864                         dev_flow->handle->rix_jump = 0;
11865                 }
11866                 sample_act->actions_num = normal_idx;
11867                 /* update sample action resource into first index of array */
11868                 mdest_res->ft_type = res->ft_type;
11869                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
11870                                 sizeof(struct mlx5_flow_sub_actions_idx));
11871                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
11872                                 sizeof(struct mlx5_flow_sub_actions_list));
11873                 mdest_res->num_of_dest = num_of_dest;
11874                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
11875                                                          dev_flow, error))
11876                         return rte_flow_error_set(error, EINVAL,
11877                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11878                                                   NULL, "can't create sample "
11879                                                   "action");
11880         } else {
11881                 res->sub_actions = sample_actions;
11882                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
11883                         return rte_flow_error_set(error, EINVAL,
11884                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11885                                                   NULL,
11886                                                   "can't create sample action");
11887         }
11888         return 0;
11889 }
11890
11891 /**
11892  * Remove an ASO age action from age actions list.
11893  *
11894  * @param[in] dev
11895  *   Pointer to the Ethernet device structure.
11896  * @param[in] age
11897  *   Pointer to the aso age action handler.
11898  */
11899 static void
11900 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
11901                                 struct mlx5_aso_age_action *age)
11902 {
11903         struct mlx5_age_info *age_info;
11904         struct mlx5_age_param *age_param = &age->age_params;
11905         struct mlx5_priv *priv = dev->data->dev_private;
11906         uint16_t expected = AGE_CANDIDATE;
11907
11908         age_info = GET_PORT_AGE_INFO(priv);
11909         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
11910                                          AGE_FREE, false, __ATOMIC_RELAXED,
11911                                          __ATOMIC_RELAXED)) {
11912                 /**
11913                  * We need the lock even it is age timeout,
11914                  * since age action may still in process.
11915                  */
11916                 rte_spinlock_lock(&age_info->aged_sl);
11917                 LIST_REMOVE(age, next);
11918                 rte_spinlock_unlock(&age_info->aged_sl);
11919                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
11920         }
11921 }
11922
11923 /**
11924  * Release an ASO age action.
11925  *
11926  * @param[in] dev
11927  *   Pointer to the Ethernet device structure.
11928  * @param[in] age_idx
11929  *   Index of ASO age action to release.
11930  * @param[in] flow
11931  *   True if the release operation is during flow destroy operation.
11932  *   False if the release operation is during action destroy operation.
11933  *
11934  * @return
11935  *   0 when age action was removed, otherwise the number of references.
11936  */
11937 static int
11938 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
11939 {
11940         struct mlx5_priv *priv = dev->data->dev_private;
11941         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11942         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
11943         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
11944
11945         if (!ret) {
11946                 flow_dv_aso_age_remove_from_age(dev, age);
11947                 rte_spinlock_lock(&mng->free_sl);
11948                 LIST_INSERT_HEAD(&mng->free, age, next);
11949                 rte_spinlock_unlock(&mng->free_sl);
11950         }
11951         return ret;
11952 }
11953
11954 /**
11955  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
11956  *
11957  * @param[in] dev
11958  *   Pointer to the Ethernet device structure.
11959  *
11960  * @return
11961  *   0 on success, otherwise negative errno value and rte_errno is set.
11962  */
11963 static int
11964 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
11965 {
11966         struct mlx5_priv *priv = dev->data->dev_private;
11967         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11968         void *old_pools = mng->pools;
11969         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
11970         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
11971         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11972
11973         if (!pools) {
11974                 rte_errno = ENOMEM;
11975                 return -ENOMEM;
11976         }
11977         if (old_pools) {
11978                 memcpy(pools, old_pools,
11979                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
11980                 mlx5_free(old_pools);
11981         } else {
11982                 /* First ASO flow hit allocation - starting ASO data-path. */
11983                 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
11984
11985                 if (ret) {
11986                         mlx5_free(pools);
11987                         return ret;
11988                 }
11989         }
11990         mng->n = resize;
11991         mng->pools = pools;
11992         return 0;
11993 }
11994
11995 /**
11996  * Create and initialize a new ASO aging pool.
11997  *
11998  * @param[in] dev
11999  *   Pointer to the Ethernet device structure.
12000  * @param[out] age_free
12001  *   Where to put the pointer of a new age action.
12002  *
12003  * @return
12004  *   The age actions pool pointer and @p age_free is set on success,
12005  *   NULL otherwise and rte_errno is set.
12006  */
12007 static struct mlx5_aso_age_pool *
12008 flow_dv_age_pool_create(struct rte_eth_dev *dev,
12009                         struct mlx5_aso_age_action **age_free)
12010 {
12011         struct mlx5_priv *priv = dev->data->dev_private;
12012         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
12013         struct mlx5_aso_age_pool *pool = NULL;
12014         struct mlx5_devx_obj *obj = NULL;
12015         uint32_t i;
12016
12017         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->cdev->ctx,
12018                                                     priv->sh->cdev->pdn);
12019         if (!obj) {
12020                 rte_errno = ENODATA;
12021                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
12022                 return NULL;
12023         }
12024         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
12025         if (!pool) {
12026                 claim_zero(mlx5_devx_cmd_destroy(obj));
12027                 rte_errno = ENOMEM;
12028                 return NULL;
12029         }
12030         pool->flow_hit_aso_obj = obj;
12031         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
12032         rte_rwlock_write_lock(&mng->resize_rwl);
12033         pool->index = mng->next;
12034         /* Resize pools array if there is no room for the new pool in it. */
12035         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
12036                 claim_zero(mlx5_devx_cmd_destroy(obj));
12037                 mlx5_free(pool);
12038                 rte_rwlock_write_unlock(&mng->resize_rwl);
12039                 return NULL;
12040         }
12041         mng->pools[pool->index] = pool;
12042         mng->next++;
12043         rte_rwlock_write_unlock(&mng->resize_rwl);
12044         /* Assign the first action in the new pool, the rest go to free list. */
12045         *age_free = &pool->actions[0];
12046         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
12047                 pool->actions[i].offset = i;
12048                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
12049         }
12050         return pool;
12051 }
12052
12053 /**
12054  * Allocate a ASO aging bit.
12055  *
12056  * @param[in] dev
12057  *   Pointer to the Ethernet device structure.
12058  * @param[out] error
12059  *   Pointer to the error structure.
12060  *
12061  * @return
12062  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
12063  */
12064 static uint32_t
12065 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12066 {
12067         struct mlx5_priv *priv = dev->data->dev_private;
12068         const struct mlx5_aso_age_pool *pool;
12069         struct mlx5_aso_age_action *age_free = NULL;
12070         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
12071
12072         MLX5_ASSERT(mng);
12073         /* Try to get the next free age action bit. */
12074         rte_spinlock_lock(&mng->free_sl);
12075         age_free = LIST_FIRST(&mng->free);
12076         if (age_free) {
12077                 LIST_REMOVE(age_free, next);
12078         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
12079                 rte_spinlock_unlock(&mng->free_sl);
12080                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12081                                    NULL, "failed to create ASO age pool");
12082                 return 0; /* 0 is an error. */
12083         }
12084         rte_spinlock_unlock(&mng->free_sl);
12085         pool = container_of
12086           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
12087                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
12088                                                                        actions);
12089         if (!age_free->dr_action) {
12090                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
12091                                                  error);
12092
12093                 if (reg_c < 0) {
12094                         rte_flow_error_set(error, rte_errno,
12095                                            RTE_FLOW_ERROR_TYPE_ACTION,
12096                                            NULL, "failed to get reg_c "
12097                                            "for ASO flow hit");
12098                         return 0; /* 0 is an error. */
12099                 }
12100 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
12101                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
12102                                 (priv->sh->rx_domain,
12103                                  pool->flow_hit_aso_obj->obj, age_free->offset,
12104                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
12105                                  (reg_c - REG_C_0));
12106 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
12107                 if (!age_free->dr_action) {
12108                         rte_errno = errno;
12109                         rte_spinlock_lock(&mng->free_sl);
12110                         LIST_INSERT_HEAD(&mng->free, age_free, next);
12111                         rte_spinlock_unlock(&mng->free_sl);
12112                         rte_flow_error_set(error, rte_errno,
12113                                            RTE_FLOW_ERROR_TYPE_ACTION,
12114                                            NULL, "failed to create ASO "
12115                                            "flow hit action");
12116                         return 0; /* 0 is an error. */
12117                 }
12118         }
12119         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
12120         return pool->index | ((age_free->offset + 1) << 16);
12121 }
12122
12123 /**
12124  * Initialize flow ASO age parameters.
12125  *
12126  * @param[in] dev
12127  *   Pointer to rte_eth_dev structure.
12128  * @param[in] age_idx
12129  *   Index of ASO age action.
12130  * @param[in] context
12131  *   Pointer to flow counter age context.
12132  * @param[in] timeout
12133  *   Aging timeout in seconds.
12134  *
12135  */
12136 static void
12137 flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
12138                             uint32_t age_idx,
12139                             void *context,
12140                             uint32_t timeout)
12141 {
12142         struct mlx5_aso_age_action *aso_age;
12143
12144         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
12145         MLX5_ASSERT(aso_age);
12146         aso_age->age_params.context = context;
12147         aso_age->age_params.timeout = timeout;
12148         aso_age->age_params.port_id = dev->data->port_id;
12149         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
12150                          __ATOMIC_RELAXED);
12151         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
12152                          __ATOMIC_RELAXED);
12153 }
12154
12155 static void
12156 flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
12157                                const struct rte_flow_item_integrity *value,
12158                                void *headers_m, void *headers_v)
12159 {
12160         if (mask->l4_ok) {
12161                 /* RTE l4_ok filter aggregates hardware l4_ok and
12162                  * l4_checksum_ok filters.
12163                  * Positive RTE l4_ok match requires hardware match on both L4
12164                  * hardware integrity bits.
12165                  * For negative match, check hardware l4_checksum_ok bit only,
12166                  * because hardware sets that bit to 0 for all packets
12167                  * with bad L4.
12168                  */
12169                 if (value->l4_ok) {
12170                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok, 1);
12171                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok, 1);
12172                 }
12173                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
12174                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
12175                          !!value->l4_ok);
12176         }
12177         if (mask->l4_csum_ok) {
12178                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
12179                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
12180                          value->l4_csum_ok);
12181         }
12182 }
12183
12184 static void
12185 flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
12186                                const struct rte_flow_item_integrity *value,
12187                                void *headers_m, void *headers_v, bool is_ipv4)
12188 {
12189         if (mask->l3_ok) {
12190                 /* RTE l3_ok filter aggregates for IPv4 hardware l3_ok and
12191                  * ipv4_csum_ok filters.
12192                  * Positive RTE l3_ok match requires hardware match on both L3
12193                  * hardware integrity bits.
12194                  * For negative match, check hardware l3_csum_ok bit only,
12195                  * because hardware sets that bit to 0 for all packets
12196                  * with bad L3.
12197                  */
12198                 if (is_ipv4) {
12199                         if (value->l3_ok) {
12200                                 MLX5_SET(fte_match_set_lyr_2_4, headers_m,
12201                                          l3_ok, 1);
12202                                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12203                                          l3_ok, 1);
12204                         }
12205                         MLX5_SET(fte_match_set_lyr_2_4, headers_m,
12206                                  ipv4_checksum_ok, 1);
12207                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12208                                  ipv4_checksum_ok, !!value->l3_ok);
12209                 } else {
12210                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok, 1);
12211                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
12212                                  value->l3_ok);
12213                 }
12214         }
12215         if (mask->ipv4_csum_ok) {
12216                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok, 1);
12217                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
12218                          value->ipv4_csum_ok);
12219         }
12220 }
12221
12222 static void
12223 set_integrity_bits(void *headers_m, void *headers_v,
12224                    const struct rte_flow_item *integrity_item, bool is_l3_ip4)
12225 {
12226         const struct rte_flow_item_integrity *spec = integrity_item->spec;
12227         const struct rte_flow_item_integrity *mask = integrity_item->mask;
12228
12229         /* Integrity bits validation cleared spec pointer */
12230         MLX5_ASSERT(spec != NULL);
12231         if (!mask)
12232                 mask = &rte_flow_item_integrity_mask;
12233         flow_dv_translate_integrity_l3(mask, spec, headers_m, headers_v,
12234                                        is_l3_ip4);
12235         flow_dv_translate_integrity_l4(mask, spec, headers_m, headers_v);
12236 }
12237
12238 static void
12239 flow_dv_translate_item_integrity_post(void *matcher, void *key,
12240                                       const
12241                                       struct rte_flow_item *integrity_items[2],
12242                                       uint64_t pattern_flags)
12243 {
12244         void *headers_m, *headers_v;
12245         bool is_l3_ip4;
12246
12247         if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
12248                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12249                                          inner_headers);
12250                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
12251                 is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4) !=
12252                             0;
12253                 set_integrity_bits(headers_m, headers_v,
12254                                    integrity_items[1], is_l3_ip4);
12255         }
12256         if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
12257                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12258                                          outer_headers);
12259                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
12260                 is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) !=
12261                             0;
12262                 set_integrity_bits(headers_m, headers_v,
12263                                    integrity_items[0], is_l3_ip4);
12264         }
12265 }
12266
12267 static void
12268 flow_dv_translate_item_integrity(const struct rte_flow_item *item,
12269                                  const struct rte_flow_item *integrity_items[2],
12270                                  uint64_t *last_item)
12271 {
12272         const struct rte_flow_item_integrity *spec = (typeof(spec))item->spec;
12273
12274         /* integrity bits validation cleared spec pointer */
12275         MLX5_ASSERT(spec != NULL);
12276         if (spec->level > 1) {
12277                 integrity_items[1] = item;
12278                 *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
12279         } else {
12280                 integrity_items[0] = item;
12281                 *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
12282         }
12283 }
12284
12285 /**
12286  * Prepares DV flow counter with aging configuration.
12287  * Gets it by index when exists, creates a new one when doesn't.
12288  *
12289  * @param[in] dev
12290  *   Pointer to rte_eth_dev structure.
12291  * @param[in] dev_flow
12292  *   Pointer to the mlx5_flow.
12293  * @param[in, out] flow
12294  *   Pointer to the sub flow.
12295  * @param[in] count
12296  *   Pointer to the counter action configuration.
12297  * @param[in] age
12298  *   Pointer to the aging action configuration.
12299  * @param[out] error
12300  *   Pointer to the error structure.
12301  *
12302  * @return
12303  *   Pointer to the counter, NULL otherwise.
12304  */
12305 static struct mlx5_flow_counter *
12306 flow_dv_prepare_counter(struct rte_eth_dev *dev,
12307                         struct mlx5_flow *dev_flow,
12308                         struct rte_flow *flow,
12309                         const struct rte_flow_action_count *count,
12310                         const struct rte_flow_action_age *age,
12311                         struct rte_flow_error *error)
12312 {
12313         if (!flow->counter) {
12314                 flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
12315                                                                  count, age);
12316                 if (!flow->counter) {
12317                         rte_flow_error_set(error, rte_errno,
12318                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12319                                            "cannot create counter object.");
12320                         return NULL;
12321                 }
12322         }
12323         return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
12324 }
12325
12326 /*
12327  * Release an ASO CT action by its own device.
12328  *
12329  * @param[in] dev
12330  *   Pointer to the Ethernet device structure.
12331  * @param[in] idx
12332  *   Index of ASO CT action to release.
12333  *
12334  * @return
12335  *   0 when CT action was removed, otherwise the number of references.
12336  */
12337 static inline int
12338 flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx)
12339 {
12340         struct mlx5_priv *priv = dev->data->dev_private;
12341         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12342         uint32_t ret;
12343         struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12344         enum mlx5_aso_ct_state state =
12345                         __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
12346
12347         /* Cannot release when CT is in the ASO SQ. */
12348         if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
12349                 return -1;
12350         ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED);
12351         if (!ret) {
12352                 if (ct->dr_action_orig) {
12353 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12354                         claim_zero(mlx5_glue->destroy_flow_action
12355                                         (ct->dr_action_orig));
12356 #endif
12357                         ct->dr_action_orig = NULL;
12358                 }
12359                 if (ct->dr_action_rply) {
12360 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12361                         claim_zero(mlx5_glue->destroy_flow_action
12362                                         (ct->dr_action_rply));
12363 #endif
12364                         ct->dr_action_rply = NULL;
12365                 }
12366                 /* Clear the state to free, no need in 1st allocation. */
12367                 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE);
12368                 rte_spinlock_lock(&mng->ct_sl);
12369                 LIST_INSERT_HEAD(&mng->free_cts, ct, next);
12370                 rte_spinlock_unlock(&mng->ct_sl);
12371         }
12372         return (int)ret;
12373 }
12374
12375 static inline int
12376 flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx,
12377                        struct rte_flow_error *error)
12378 {
12379         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
12380         uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
12381         struct rte_eth_dev *owndev = &rte_eth_devices[owner];
12382         int ret;
12383
12384         MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
12385         if (dev->data->dev_started != 1)
12386                 return rte_flow_error_set(error, EAGAIN,
12387                                           RTE_FLOW_ERROR_TYPE_ACTION,
12388                                           NULL,
12389                                           "Indirect CT action cannot be destroyed when the port is stopped");
12390         ret = flow_dv_aso_ct_dev_release(owndev, idx);
12391         if (ret < 0)
12392                 return rte_flow_error_set(error, EAGAIN,
12393                                           RTE_FLOW_ERROR_TYPE_ACTION,
12394                                           NULL,
12395                                           "Current state prevents indirect CT action from being destroyed");
12396         return ret;
12397 }
12398
12399 /*
12400  * Resize the ASO CT pools array by 64 pools.
12401  *
12402  * @param[in] dev
12403  *   Pointer to the Ethernet device structure.
12404  *
12405  * @return
12406  *   0 on success, otherwise negative errno value and rte_errno is set.
12407  */
12408 static int
12409 flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev)
12410 {
12411         struct mlx5_priv *priv = dev->data->dev_private;
12412         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12413         void *old_pools = mng->pools;
12414         /* Magic number now, need a macro. */
12415         uint32_t resize = mng->n + 64;
12416         uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize;
12417         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
12418
12419         if (!pools) {
12420                 rte_errno = ENOMEM;
12421                 return -rte_errno;
12422         }
12423         rte_rwlock_write_lock(&mng->resize_rwl);
12424         /* ASO SQ/QP was already initialized in the startup. */
12425         if (old_pools) {
12426                 /* Realloc could be an alternative choice. */
12427                 rte_memcpy(pools, old_pools,
12428                            mng->n * sizeof(struct mlx5_aso_ct_pool *));
12429                 mlx5_free(old_pools);
12430         }
12431         mng->n = resize;
12432         mng->pools = pools;
12433         rte_rwlock_write_unlock(&mng->resize_rwl);
12434         return 0;
12435 }
12436
12437 /*
12438  * Create and initialize a new ASO CT pool.
12439  *
12440  * @param[in] dev
12441  *   Pointer to the Ethernet device structure.
12442  * @param[out] ct_free
12443  *   Where to put the pointer of a new CT action.
12444  *
12445  * @return
12446  *   The CT actions pool pointer and @p ct_free is set on success,
12447  *   NULL otherwise and rte_errno is set.
12448  */
12449 static struct mlx5_aso_ct_pool *
12450 flow_dv_ct_pool_create(struct rte_eth_dev *dev,
12451                        struct mlx5_aso_ct_action **ct_free)
12452 {
12453         struct mlx5_priv *priv = dev->data->dev_private;
12454         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12455         struct mlx5_aso_ct_pool *pool = NULL;
12456         struct mlx5_devx_obj *obj = NULL;
12457         uint32_t i;
12458         uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
12459
12460         obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
12461                                                           priv->sh->cdev->pdn,
12462                                                           log_obj_size);
12463         if (!obj) {
12464                 rte_errno = ENODATA;
12465                 DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
12466                 return NULL;
12467         }
12468         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
12469         if (!pool) {
12470                 rte_errno = ENOMEM;
12471                 claim_zero(mlx5_devx_cmd_destroy(obj));
12472                 return NULL;
12473         }
12474         pool->devx_obj = obj;
12475         pool->index = mng->next;
12476         /* Resize pools array if there is no room for the new pool in it. */
12477         if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) {
12478                 claim_zero(mlx5_devx_cmd_destroy(obj));
12479                 mlx5_free(pool);
12480                 return NULL;
12481         }
12482         mng->pools[pool->index] = pool;
12483         mng->next++;
12484         /* Assign the first action in the new pool, the rest go to free list. */
12485         *ct_free = &pool->actions[0];
12486         /* Lock outside, the list operation is safe here. */
12487         for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
12488                 /* refcnt is 0 when allocating the memory. */
12489                 pool->actions[i].offset = i;
12490                 LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next);
12491         }
12492         return pool;
12493 }
12494
12495 /*
12496  * Allocate a ASO CT action from free list.
12497  *
12498  * @param[in] dev
12499  *   Pointer to the Ethernet device structure.
12500  * @param[out] error
12501  *   Pointer to the error structure.
12502  *
12503  * @return
12504  *   Index to ASO CT action on success, 0 otherwise and rte_errno is set.
12505  */
12506 static uint32_t
12507 flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12508 {
12509         struct mlx5_priv *priv = dev->data->dev_private;
12510         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12511         struct mlx5_aso_ct_action *ct = NULL;
12512         struct mlx5_aso_ct_pool *pool;
12513         uint8_t reg_c;
12514         uint32_t ct_idx;
12515
12516         MLX5_ASSERT(mng);
12517         if (!priv->sh->devx) {
12518                 rte_errno = ENOTSUP;
12519                 return 0;
12520         }
12521         /* Get a free CT action, if no, a new pool will be created. */
12522         rte_spinlock_lock(&mng->ct_sl);
12523         ct = LIST_FIRST(&mng->free_cts);
12524         if (ct) {
12525                 LIST_REMOVE(ct, next);
12526         } else if (!flow_dv_ct_pool_create(dev, &ct)) {
12527                 rte_spinlock_unlock(&mng->ct_sl);
12528                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12529                                    NULL, "failed to create ASO CT pool");
12530                 return 0;
12531         }
12532         rte_spinlock_unlock(&mng->ct_sl);
12533         pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
12534         ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
12535         /* 0: inactive, 1: created, 2+: used by flows. */
12536         __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
12537         reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
12538         if (!ct->dr_action_orig) {
12539 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12540                 ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso
12541                         (priv->sh->rx_domain, pool->devx_obj->obj,
12542                          ct->offset,
12543                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR,
12544                          reg_c - REG_C_0);
12545 #else
12546                 RTE_SET_USED(reg_c);
12547 #endif
12548                 if (!ct->dr_action_orig) {
12549                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12550                         rte_flow_error_set(error, rte_errno,
12551                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12552                                            "failed to create ASO CT action");
12553                         return 0;
12554                 }
12555         }
12556         if (!ct->dr_action_rply) {
12557 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12558                 ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso
12559                         (priv->sh->rx_domain, pool->devx_obj->obj,
12560                          ct->offset,
12561                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER,
12562                          reg_c - REG_C_0);
12563 #endif
12564                 if (!ct->dr_action_rply) {
12565                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12566                         rte_flow_error_set(error, rte_errno,
12567                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12568                                            "failed to create ASO CT action");
12569                         return 0;
12570                 }
12571         }
12572         return ct_idx;
12573 }
12574
12575 /*
12576  * Create a conntrack object with context and actions by using ASO mechanism.
12577  *
12578  * @param[in] dev
12579  *   Pointer to rte_eth_dev structure.
12580  * @param[in] pro
12581  *   Pointer to conntrack information profile.
12582  * @param[out] error
12583  *   Pointer to the error structure.
12584  *
12585  * @return
12586  *   Index to conntrack object on success, 0 otherwise.
12587  */
12588 static uint32_t
12589 flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
12590                                    const struct rte_flow_action_conntrack *pro,
12591                                    struct rte_flow_error *error)
12592 {
12593         struct mlx5_priv *priv = dev->data->dev_private;
12594         struct mlx5_dev_ctx_shared *sh = priv->sh;
12595         struct mlx5_aso_ct_action *ct;
12596         uint32_t idx;
12597
12598         if (!sh->ct_aso_en)
12599                 return rte_flow_error_set(error, ENOTSUP,
12600                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12601                                           "Connection is not supported");
12602         idx = flow_dv_aso_ct_alloc(dev, error);
12603         if (!idx)
12604                 return rte_flow_error_set(error, rte_errno,
12605                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12606                                           "Failed to allocate CT object");
12607         ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12608         if (mlx5_aso_ct_update_by_wqe(sh, ct, pro))
12609                 return rte_flow_error_set(error, EBUSY,
12610                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12611                                           "Failed to update CT");
12612         ct->is_original = !!pro->is_original_dir;
12613         ct->peer = pro->peer_port;
12614         return idx;
12615 }
12616
12617 /**
12618  * Fill the flow with DV spec, lock free
12619  * (mutex should be acquired by caller).
12620  *
12621  * @param[in] dev
12622  *   Pointer to rte_eth_dev structure.
12623  * @param[in, out] dev_flow
12624  *   Pointer to the sub flow.
12625  * @param[in] attr
12626  *   Pointer to the flow attributes.
12627  * @param[in] items
12628  *   Pointer to the list of items.
12629  * @param[in] actions
12630  *   Pointer to the list of actions.
12631  * @param[out] error
12632  *   Pointer to the error structure.
12633  *
12634  * @return
12635  *   0 on success, a negative errno value otherwise and rte_errno is set.
12636  */
12637 static int
12638 flow_dv_translate(struct rte_eth_dev *dev,
12639                   struct mlx5_flow *dev_flow,
12640                   const struct rte_flow_attr *attr,
12641                   const struct rte_flow_item items[],
12642                   const struct rte_flow_action actions[],
12643                   struct rte_flow_error *error)
12644 {
12645         struct mlx5_priv *priv = dev->data->dev_private;
12646         struct mlx5_dev_config *dev_conf = &priv->config;
12647         struct rte_flow *flow = dev_flow->flow;
12648         struct mlx5_flow_handle *handle = dev_flow->handle;
12649         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12650         struct mlx5_flow_rss_desc *rss_desc;
12651         uint64_t item_flags = 0;
12652         uint64_t last_item = 0;
12653         uint64_t action_flags = 0;
12654         struct mlx5_flow_dv_matcher matcher = {
12655                 .mask = {
12656                         .size = sizeof(matcher.mask.buf),
12657                 },
12658         };
12659         int actions_n = 0;
12660         bool actions_end = false;
12661         union {
12662                 struct mlx5_flow_dv_modify_hdr_resource res;
12663                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
12664                             sizeof(struct mlx5_modification_cmd) *
12665                             (MLX5_MAX_MODIFY_NUM + 1)];
12666         } mhdr_dummy;
12667         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
12668         const struct rte_flow_action_count *count = NULL;
12669         const struct rte_flow_action_age *non_shared_age = NULL;
12670         union flow_dv_attr flow_attr = { .attr = 0 };
12671         uint32_t tag_be;
12672         union mlx5_flow_tbl_key tbl_key;
12673         uint32_t modify_action_position = UINT32_MAX;
12674         void *match_mask = matcher.mask.buf;
12675         void *match_value = dev_flow->dv.value.buf;
12676         uint8_t next_protocol = 0xff;
12677         struct rte_vlan_hdr vlan = { 0 };
12678         struct mlx5_flow_dv_dest_array_resource mdest_res;
12679         struct mlx5_flow_dv_sample_resource sample_res;
12680         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
12681         const struct rte_flow_action_sample *sample = NULL;
12682         struct mlx5_flow_sub_actions_list *sample_act;
12683         uint32_t sample_act_pos = UINT32_MAX;
12684         uint32_t age_act_pos = UINT32_MAX;
12685         uint32_t num_of_dest = 0;
12686         int tmp_actions_n = 0;
12687         uint32_t table;
12688         int ret = 0;
12689         const struct mlx5_flow_tunnel *tunnel = NULL;
12690         struct flow_grp_info grp_info = {
12691                 .external = !!dev_flow->external,
12692                 .transfer = !!attr->transfer,
12693                 .fdb_def_rule = !!priv->fdb_def_rule,
12694                 .skip_scale = dev_flow->skip_scale &
12695                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
12696                 .std_tbl_fix = true,
12697         };
12698         const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
12699         const struct rte_flow_item *tunnel_item = NULL;
12700
12701         if (!wks)
12702                 return rte_flow_error_set(error, ENOMEM,
12703                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12704                                           NULL,
12705                                           "failed to push flow workspace");
12706         rss_desc = &wks->rss_desc;
12707         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
12708         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
12709         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12710                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12711         /* update normal path action resource into last index of array */
12712         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
12713         if (is_tunnel_offload_active(dev)) {
12714                 if (dev_flow->tunnel) {
12715                         RTE_VERIFY(dev_flow->tof_type ==
12716                                    MLX5_TUNNEL_OFFLOAD_MISS_RULE);
12717                         tunnel = dev_flow->tunnel;
12718                 } else {
12719                         tunnel = mlx5_get_tof(items, actions,
12720                                               &dev_flow->tof_type);
12721                         dev_flow->tunnel = tunnel;
12722                 }
12723                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
12724                                         (dev, attr, tunnel, dev_flow->tof_type);
12725         }
12726         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12727                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12728         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
12729                                        &grp_info, error);
12730         if (ret)
12731                 return ret;
12732         dev_flow->dv.group = table;
12733         if (attr->transfer)
12734                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
12735         /* number of actions must be set to 0 in case of dirty stack. */
12736         mhdr_res->actions_num = 0;
12737         if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
12738                 /*
12739                  * do not add decap action if match rule drops packet
12740                  * HW rejects rules with decap & drop
12741                  *
12742                  * if tunnel match rule was inserted before matching tunnel set
12743                  * rule flow table used in the match rule must be registered.
12744                  * current implementation handles that in the
12745                  * flow_dv_match_register() at the function end.
12746                  */
12747                 bool add_decap = true;
12748                 const struct rte_flow_action *ptr = actions;
12749
12750                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
12751                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
12752                                 add_decap = false;
12753                                 break;
12754                         }
12755                 }
12756                 if (add_decap) {
12757                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12758                                                            attr->transfer,
12759                                                            error))
12760                                 return -rte_errno;
12761                         dev_flow->dv.actions[actions_n++] =
12762                                         dev_flow->dv.encap_decap->action;
12763                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12764                 }
12765         }
12766         for (; !actions_end ; actions++) {
12767                 const struct rte_flow_action_queue *queue;
12768                 const struct rte_flow_action_rss *rss;
12769                 const struct rte_flow_action *action = actions;
12770                 const uint8_t *rss_key;
12771                 struct mlx5_flow_tbl_resource *tbl;
12772                 struct mlx5_aso_age_action *age_act;
12773                 struct mlx5_flow_counter *cnt_act;
12774                 uint32_t port_id = 0;
12775                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
12776                 int action_type = actions->type;
12777                 const struct rte_flow_action *found_action = NULL;
12778                 uint32_t jump_group = 0;
12779                 uint32_t owner_idx;
12780                 struct mlx5_aso_ct_action *ct;
12781
12782                 if (!mlx5_flow_os_action_supported(action_type))
12783                         return rte_flow_error_set(error, ENOTSUP,
12784                                                   RTE_FLOW_ERROR_TYPE_ACTION,
12785                                                   actions,
12786                                                   "action not supported");
12787                 switch (action_type) {
12788                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
12789                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
12790                         break;
12791                 case RTE_FLOW_ACTION_TYPE_VOID:
12792                         break;
12793                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
12794                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
12795                         if (flow_dv_translate_action_port_id(dev, action,
12796                                                              &port_id, error))
12797                                 return -rte_errno;
12798                         port_id_resource.port_id = port_id;
12799                         MLX5_ASSERT(!handle->rix_port_id_action);
12800                         if (flow_dv_port_id_action_resource_register
12801                             (dev, &port_id_resource, dev_flow, error))
12802                                 return -rte_errno;
12803                         dev_flow->dv.actions[actions_n++] =
12804                                         dev_flow->dv.port_id_action->action;
12805                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12806                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
12807                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12808                         num_of_dest++;
12809                         break;
12810                 case RTE_FLOW_ACTION_TYPE_FLAG:
12811                         action_flags |= MLX5_FLOW_ACTION_FLAG;
12812                         wks->mark = 1;
12813                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12814                                 struct rte_flow_action_mark mark = {
12815                                         .id = MLX5_FLOW_MARK_DEFAULT,
12816                                 };
12817
12818                                 if (flow_dv_convert_action_mark(dev, &mark,
12819                                                                 mhdr_res,
12820                                                                 error))
12821                                         return -rte_errno;
12822                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12823                                 break;
12824                         }
12825                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
12826                         /*
12827                          * Only one FLAG or MARK is supported per device flow
12828                          * right now. So the pointer to the tag resource must be
12829                          * zero before the register process.
12830                          */
12831                         MLX5_ASSERT(!handle->dvh.rix_tag);
12832                         if (flow_dv_tag_resource_register(dev, tag_be,
12833                                                           dev_flow, error))
12834                                 return -rte_errno;
12835                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12836                         dev_flow->dv.actions[actions_n++] =
12837                                         dev_flow->dv.tag_resource->action;
12838                         break;
12839                 case RTE_FLOW_ACTION_TYPE_MARK:
12840                         action_flags |= MLX5_FLOW_ACTION_MARK;
12841                         wks->mark = 1;
12842                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12843                                 const struct rte_flow_action_mark *mark =
12844                                         (const struct rte_flow_action_mark *)
12845                                                 actions->conf;
12846
12847                                 if (flow_dv_convert_action_mark(dev, mark,
12848                                                                 mhdr_res,
12849                                                                 error))
12850                                         return -rte_errno;
12851                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12852                                 break;
12853                         }
12854                         /* Fall-through */
12855                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
12856                         /* Legacy (non-extensive) MARK action. */
12857                         tag_be = mlx5_flow_mark_set
12858                               (((const struct rte_flow_action_mark *)
12859                                (actions->conf))->id);
12860                         MLX5_ASSERT(!handle->dvh.rix_tag);
12861                         if (flow_dv_tag_resource_register(dev, tag_be,
12862                                                           dev_flow, error))
12863                                 return -rte_errno;
12864                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12865                         dev_flow->dv.actions[actions_n++] =
12866                                         dev_flow->dv.tag_resource->action;
12867                         break;
12868                 case RTE_FLOW_ACTION_TYPE_SET_META:
12869                         if (flow_dv_convert_action_set_meta
12870                                 (dev, mhdr_res, attr,
12871                                  (const struct rte_flow_action_set_meta *)
12872                                   actions->conf, error))
12873                                 return -rte_errno;
12874                         action_flags |= MLX5_FLOW_ACTION_SET_META;
12875                         break;
12876                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
12877                         if (flow_dv_convert_action_set_tag
12878                                 (dev, mhdr_res,
12879                                  (const struct rte_flow_action_set_tag *)
12880                                   actions->conf, error))
12881                                 return -rte_errno;
12882                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12883                         break;
12884                 case RTE_FLOW_ACTION_TYPE_DROP:
12885                         action_flags |= MLX5_FLOW_ACTION_DROP;
12886                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
12887                         break;
12888                 case RTE_FLOW_ACTION_TYPE_QUEUE:
12889                         queue = actions->conf;
12890                         rss_desc->queue_num = 1;
12891                         rss_desc->queue[0] = queue->index;
12892                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
12893                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
12894                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
12895                         num_of_dest++;
12896                         break;
12897                 case RTE_FLOW_ACTION_TYPE_RSS:
12898                         rss = actions->conf;
12899                         memcpy(rss_desc->queue, rss->queue,
12900                                rss->queue_num * sizeof(uint16_t));
12901                         rss_desc->queue_num = rss->queue_num;
12902                         /* NULL RSS key indicates default RSS key. */
12903                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
12904                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12905                         /*
12906                          * rss->level and rss.types should be set in advance
12907                          * when expanding items for RSS.
12908                          */
12909                         action_flags |= MLX5_FLOW_ACTION_RSS;
12910                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
12911                                 MLX5_FLOW_FATE_SHARED_RSS :
12912                                 MLX5_FLOW_FATE_QUEUE;
12913                         break;
12914                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
12915                         owner_idx = (uint32_t)(uintptr_t)action->conf;
12916                         age_act = flow_aso_age_get_by_idx(dev, owner_idx);
12917                         if (flow->age == 0) {
12918                                 flow->age = owner_idx;
12919                                 __atomic_fetch_add(&age_act->refcnt, 1,
12920                                                    __ATOMIC_RELAXED);
12921                         }
12922                         age_act_pos = actions_n++;
12923                         action_flags |= MLX5_FLOW_ACTION_AGE;
12924                         break;
12925                 case RTE_FLOW_ACTION_TYPE_AGE:
12926                         non_shared_age = action->conf;
12927                         age_act_pos = actions_n++;
12928                         action_flags |= MLX5_FLOW_ACTION_AGE;
12929                         break;
12930                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
12931                         owner_idx = (uint32_t)(uintptr_t)action->conf;
12932                         cnt_act = flow_dv_counter_get_by_idx(dev, owner_idx,
12933                                                              NULL);
12934                         MLX5_ASSERT(cnt_act != NULL);
12935                         /**
12936                          * When creating meter drop flow in drop table, the
12937                          * counter should not overwrite the rte flow counter.
12938                          */
12939                         if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
12940                             dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP) {
12941                                 dev_flow->dv.actions[actions_n++] =
12942                                                         cnt_act->action;
12943                         } else {
12944                                 if (flow->counter == 0) {
12945                                         flow->counter = owner_idx;
12946                                         __atomic_fetch_add
12947                                                 (&cnt_act->shared_info.refcnt,
12948                                                  1, __ATOMIC_RELAXED);
12949                                 }
12950                                 /* Save information first, will apply later. */
12951                                 action_flags |= MLX5_FLOW_ACTION_COUNT;
12952                         }
12953                         break;
12954                 case RTE_FLOW_ACTION_TYPE_COUNT:
12955                         if (!priv->sh->devx) {
12956                                 return rte_flow_error_set
12957                                               (error, ENOTSUP,
12958                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12959                                                NULL,
12960                                                "count action not supported");
12961                         }
12962                         /* Save information first, will apply later. */
12963                         count = action->conf;
12964                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12965                         break;
12966                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
12967                         dev_flow->dv.actions[actions_n++] =
12968                                                 priv->sh->pop_vlan_action;
12969                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
12970                         break;
12971                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
12972                         if (!(action_flags &
12973                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
12974                                 flow_dev_get_vlan_info_from_items(items, &vlan);
12975                         vlan.eth_proto = rte_be_to_cpu_16
12976                              ((((const struct rte_flow_action_of_push_vlan *)
12977                                                    actions->conf)->ethertype));
12978                         found_action = mlx5_flow_find_action
12979                                         (actions + 1,
12980                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
12981                         if (found_action)
12982                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12983                         found_action = mlx5_flow_find_action
12984                                         (actions + 1,
12985                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
12986                         if (found_action)
12987                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12988                         if (flow_dv_create_action_push_vlan
12989                                             (dev, attr, &vlan, dev_flow, error))
12990                                 return -rte_errno;
12991                         dev_flow->dv.actions[actions_n++] =
12992                                         dev_flow->dv.push_vlan_res->action;
12993                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
12994                         break;
12995                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
12996                         /* of_vlan_push action handled this action */
12997                         MLX5_ASSERT(action_flags &
12998                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
12999                         break;
13000                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
13001                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
13002                                 break;
13003                         flow_dev_get_vlan_info_from_items(items, &vlan);
13004                         mlx5_update_vlan_vid_pcp(actions, &vlan);
13005                         /* If no VLAN push - this is a modify header action */
13006                         if (flow_dv_convert_action_modify_vlan_vid
13007                                                 (mhdr_res, actions, error))
13008                                 return -rte_errno;
13009                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
13010                         break;
13011                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
13012                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
13013                         if (flow_dv_create_action_l2_encap(dev, actions,
13014                                                            dev_flow,
13015                                                            attr->transfer,
13016                                                            error))
13017                                 return -rte_errno;
13018                         dev_flow->dv.actions[actions_n++] =
13019                                         dev_flow->dv.encap_decap->action;
13020                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
13021                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
13022                                 sample_act->action_flags |=
13023                                                         MLX5_FLOW_ACTION_ENCAP;
13024                         break;
13025                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
13026                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
13027                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
13028                                                            attr->transfer,
13029                                                            error))
13030                                 return -rte_errno;
13031                         dev_flow->dv.actions[actions_n++] =
13032                                         dev_flow->dv.encap_decap->action;
13033                         action_flags |= MLX5_FLOW_ACTION_DECAP;
13034                         break;
13035                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
13036                         /* Handle encap with preceding decap. */
13037                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
13038                                 if (flow_dv_create_action_raw_encap
13039                                         (dev, actions, dev_flow, attr, error))
13040                                         return -rte_errno;
13041                                 dev_flow->dv.actions[actions_n++] =
13042                                         dev_flow->dv.encap_decap->action;
13043                         } else {
13044                                 /* Handle encap without preceding decap. */
13045                                 if (flow_dv_create_action_l2_encap
13046                                     (dev, actions, dev_flow, attr->transfer,
13047                                      error))
13048                                         return -rte_errno;
13049                                 dev_flow->dv.actions[actions_n++] =
13050                                         dev_flow->dv.encap_decap->action;
13051                         }
13052                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
13053                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
13054                                 sample_act->action_flags |=
13055                                                         MLX5_FLOW_ACTION_ENCAP;
13056                         break;
13057                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
13058                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
13059                                 ;
13060                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
13061                                 if (flow_dv_create_action_l2_decap
13062                                     (dev, dev_flow, attr->transfer, error))
13063                                         return -rte_errno;
13064                                 dev_flow->dv.actions[actions_n++] =
13065                                         dev_flow->dv.encap_decap->action;
13066                         }
13067                         /* If decap is followed by encap, handle it at encap. */
13068                         action_flags |= MLX5_FLOW_ACTION_DECAP;
13069                         break;
13070                 case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
13071                         dev_flow->dv.actions[actions_n++] =
13072                                 (void *)(uintptr_t)action->conf;
13073                         action_flags |= MLX5_FLOW_ACTION_JUMP;
13074                         break;
13075                 case RTE_FLOW_ACTION_TYPE_JUMP:
13076                         jump_group = ((const struct rte_flow_action_jump *)
13077                                                         action->conf)->group;
13078                         grp_info.std_tbl_fix = 0;
13079                         if (dev_flow->skip_scale &
13080                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
13081                                 grp_info.skip_scale = 1;
13082                         else
13083                                 grp_info.skip_scale = 0;
13084                         ret = mlx5_flow_group_to_table(dev, tunnel,
13085                                                        jump_group,
13086                                                        &table,
13087                                                        &grp_info, error);
13088                         if (ret)
13089                                 return ret;
13090                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
13091                                                        attr->transfer,
13092                                                        !!dev_flow->external,
13093                                                        tunnel, jump_group, 0,
13094                                                        0, error);
13095                         if (!tbl)
13096                                 return rte_flow_error_set
13097                                                 (error, errno,
13098                                                  RTE_FLOW_ERROR_TYPE_ACTION,
13099                                                  NULL,
13100                                                  "cannot create jump action.");
13101                         if (flow_dv_jump_tbl_resource_register
13102                             (dev, tbl, dev_flow, error)) {
13103                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
13104                                 return rte_flow_error_set
13105                                                 (error, errno,
13106                                                  RTE_FLOW_ERROR_TYPE_ACTION,
13107                                                  NULL,
13108                                                  "cannot create jump action.");
13109                         }
13110                         dev_flow->dv.actions[actions_n++] =
13111                                         dev_flow->dv.jump->action;
13112                         action_flags |= MLX5_FLOW_ACTION_JUMP;
13113                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
13114                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
13115                         num_of_dest++;
13116                         break;
13117                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
13118                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
13119                         if (flow_dv_convert_action_modify_mac
13120                                         (mhdr_res, actions, error))
13121                                 return -rte_errno;
13122                         action_flags |= actions->type ==
13123                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
13124                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
13125                                         MLX5_FLOW_ACTION_SET_MAC_DST;
13126                         break;
13127                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
13128                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
13129                         if (flow_dv_convert_action_modify_ipv4
13130                                         (mhdr_res, actions, error))
13131                                 return -rte_errno;
13132                         action_flags |= actions->type ==
13133                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
13134                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
13135                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
13136                         break;
13137                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
13138                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
13139                         if (flow_dv_convert_action_modify_ipv6
13140                                         (mhdr_res, actions, error))
13141                                 return -rte_errno;
13142                         action_flags |= actions->type ==
13143                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
13144                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
13145                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
13146                         break;
13147                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
13148                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
13149                         if (flow_dv_convert_action_modify_tp
13150                                         (mhdr_res, actions, items,
13151                                          &flow_attr, dev_flow, !!(action_flags &
13152                                          MLX5_FLOW_ACTION_DECAP), error))
13153                                 return -rte_errno;
13154                         action_flags |= actions->type ==
13155                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
13156                                         MLX5_FLOW_ACTION_SET_TP_SRC :
13157                                         MLX5_FLOW_ACTION_SET_TP_DST;
13158                         break;
13159                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
13160                         if (flow_dv_convert_action_modify_dec_ttl
13161                                         (mhdr_res, items, &flow_attr, dev_flow,
13162                                          !!(action_flags &
13163                                          MLX5_FLOW_ACTION_DECAP), error))
13164                                 return -rte_errno;
13165                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
13166                         break;
13167                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
13168                         if (flow_dv_convert_action_modify_ttl
13169                                         (mhdr_res, actions, items, &flow_attr,
13170                                          dev_flow, !!(action_flags &
13171                                          MLX5_FLOW_ACTION_DECAP), error))
13172                                 return -rte_errno;
13173                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
13174                         break;
13175                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
13176                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
13177                         if (flow_dv_convert_action_modify_tcp_seq
13178                                         (mhdr_res, actions, error))
13179                                 return -rte_errno;
13180                         action_flags |= actions->type ==
13181                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
13182                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
13183                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
13184                         break;
13185
13186                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
13187                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
13188                         if (flow_dv_convert_action_modify_tcp_ack
13189                                         (mhdr_res, actions, error))
13190                                 return -rte_errno;
13191                         action_flags |= actions->type ==
13192                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
13193                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
13194                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
13195                         break;
13196                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
13197                         if (flow_dv_convert_action_set_reg
13198                                         (mhdr_res, actions, error))
13199                                 return -rte_errno;
13200                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13201                         break;
13202                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
13203                         if (flow_dv_convert_action_copy_mreg
13204                                         (dev, mhdr_res, actions, error))
13205                                 return -rte_errno;
13206                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13207                         break;
13208                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
13209                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
13210                         dev_flow->handle->fate_action =
13211                                         MLX5_FLOW_FATE_DEFAULT_MISS;
13212                         break;
13213                 case RTE_FLOW_ACTION_TYPE_METER:
13214                         if (!wks->fm)
13215                                 return rte_flow_error_set(error, rte_errno,
13216                                         RTE_FLOW_ERROR_TYPE_ACTION,
13217                                         NULL, "Failed to get meter in flow.");
13218                         /* Set the meter action. */
13219                         dev_flow->dv.actions[actions_n++] =
13220                                 wks->fm->meter_action;
13221                         action_flags |= MLX5_FLOW_ACTION_METER;
13222                         break;
13223                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
13224                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
13225                                                               actions, error))
13226                                 return -rte_errno;
13227                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
13228                         break;
13229                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
13230                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
13231                                                               actions, error))
13232                                 return -rte_errno;
13233                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
13234                         break;
13235                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
13236                         sample_act_pos = actions_n;
13237                         sample = (const struct rte_flow_action_sample *)
13238                                  action->conf;
13239                         actions_n++;
13240                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
13241                         /* put encap action into group if work with port id */
13242                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
13243                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
13244                                 sample_act->action_flags |=
13245                                                         MLX5_FLOW_ACTION_ENCAP;
13246                         break;
13247                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
13248                         if (flow_dv_convert_action_modify_field
13249                                         (dev, mhdr_res, actions, attr, error))
13250                                 return -rte_errno;
13251                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
13252                         break;
13253                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
13254                         owner_idx = (uint32_t)(uintptr_t)action->conf;
13255                         ct = flow_aso_ct_get_by_idx(dev, owner_idx);
13256                         if (!ct)
13257                                 return rte_flow_error_set(error, EINVAL,
13258                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13259                                                 NULL,
13260                                                 "Failed to get CT object.");
13261                         if (mlx5_aso_ct_available(priv->sh, ct))
13262                                 return rte_flow_error_set(error, rte_errno,
13263                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13264                                                 NULL,
13265                                                 "CT is unavailable.");
13266                         if (ct->is_original)
13267                                 dev_flow->dv.actions[actions_n] =
13268                                                         ct->dr_action_orig;
13269                         else
13270                                 dev_flow->dv.actions[actions_n] =
13271                                                         ct->dr_action_rply;
13272                         if (flow->ct == 0) {
13273                                 flow->indirect_type =
13274                                                 MLX5_INDIRECT_ACTION_TYPE_CT;
13275                                 flow->ct = owner_idx;
13276                                 __atomic_fetch_add(&ct->refcnt, 1,
13277                                                    __ATOMIC_RELAXED);
13278                         }
13279                         actions_n++;
13280                         action_flags |= MLX5_FLOW_ACTION_CT;
13281                         break;
13282                 case RTE_FLOW_ACTION_TYPE_END:
13283                         actions_end = true;
13284                         if (mhdr_res->actions_num) {
13285                                 /* create modify action if needed. */
13286                                 if (flow_dv_modify_hdr_resource_register
13287                                         (dev, mhdr_res, dev_flow, error))
13288                                         return -rte_errno;
13289                                 dev_flow->dv.actions[modify_action_position] =
13290                                         handle->dvh.modify_hdr->action;
13291                         }
13292                         /*
13293                          * Handle AGE and COUNT action by single HW counter
13294                          * when they are not shared.
13295                          */
13296                         if (action_flags & MLX5_FLOW_ACTION_AGE) {
13297                                 if ((non_shared_age && count) ||
13298                                     !(priv->sh->flow_hit_aso_en &&
13299                                       (attr->group || attr->transfer))) {
13300                                         /* Creates age by counters. */
13301                                         cnt_act = flow_dv_prepare_counter
13302                                                                 (dev, dev_flow,
13303                                                                  flow, count,
13304                                                                  non_shared_age,
13305                                                                  error);
13306                                         if (!cnt_act)
13307                                                 return -rte_errno;
13308                                         dev_flow->dv.actions[age_act_pos] =
13309                                                                 cnt_act->action;
13310                                         break;
13311                                 }
13312                                 if (!flow->age && non_shared_age) {
13313                                         flow->age = flow_dv_aso_age_alloc
13314                                                                 (dev, error);
13315                                         if (!flow->age)
13316                                                 return -rte_errno;
13317                                         flow_dv_aso_age_params_init
13318                                                     (dev, flow->age,
13319                                                      non_shared_age->context ?
13320                                                      non_shared_age->context :
13321                                                      (void *)(uintptr_t)
13322                                                      (dev_flow->flow_idx),
13323                                                      non_shared_age->timeout);
13324                                 }
13325                                 age_act = flow_aso_age_get_by_idx(dev,
13326                                                                   flow->age);
13327                                 dev_flow->dv.actions[age_act_pos] =
13328                                                              age_act->dr_action;
13329                         }
13330                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
13331                                 /*
13332                                  * Create one count action, to be used
13333                                  * by all sub-flows.
13334                                  */
13335                                 cnt_act = flow_dv_prepare_counter(dev, dev_flow,
13336                                                                   flow, count,
13337                                                                   NULL, error);
13338                                 if (!cnt_act)
13339                                         return -rte_errno;
13340                                 dev_flow->dv.actions[actions_n++] =
13341                                                                 cnt_act->action;
13342                         }
13343                 default:
13344                         break;
13345                 }
13346                 if (mhdr_res->actions_num &&
13347                     modify_action_position == UINT32_MAX)
13348                         modify_action_position = actions_n++;
13349         }
13350         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
13351                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
13352                 int item_type = items->type;
13353
13354                 if (!mlx5_flow_os_item_supported(item_type))
13355                         return rte_flow_error_set(error, ENOTSUP,
13356                                                   RTE_FLOW_ERROR_TYPE_ITEM,
13357                                                   NULL, "item not supported");
13358                 switch (item_type) {
13359                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
13360                         flow_dv_translate_item_port_id
13361                                 (dev, match_mask, match_value, items, attr);
13362                         last_item = MLX5_FLOW_ITEM_PORT_ID;
13363                         break;
13364                 case RTE_FLOW_ITEM_TYPE_ETH:
13365                         flow_dv_translate_item_eth(match_mask, match_value,
13366                                                    items, tunnel,
13367                                                    dev_flow->dv.group);
13368                         matcher.priority = action_flags &
13369                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
13370                                         !dev_flow->external ?
13371                                         MLX5_PRIORITY_MAP_L3 :
13372                                         MLX5_PRIORITY_MAP_L2;
13373                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
13374                                              MLX5_FLOW_LAYER_OUTER_L2;
13375                         break;
13376                 case RTE_FLOW_ITEM_TYPE_VLAN:
13377                         flow_dv_translate_item_vlan(dev_flow,
13378                                                     match_mask, match_value,
13379                                                     items, tunnel,
13380                                                     dev_flow->dv.group);
13381                         matcher.priority = MLX5_PRIORITY_MAP_L2;
13382                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
13383                                               MLX5_FLOW_LAYER_INNER_VLAN) :
13384                                              (MLX5_FLOW_LAYER_OUTER_L2 |
13385                                               MLX5_FLOW_LAYER_OUTER_VLAN);
13386                         break;
13387                 case RTE_FLOW_ITEM_TYPE_IPV4:
13388                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13389                                                   &item_flags, &tunnel);
13390                         flow_dv_translate_item_ipv4(match_mask, match_value,
13391                                                     items, tunnel,
13392                                                     dev_flow->dv.group);
13393                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13394                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
13395                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
13396                         if (items->mask != NULL &&
13397                             ((const struct rte_flow_item_ipv4 *)
13398                              items->mask)->hdr.next_proto_id) {
13399                                 next_protocol =
13400                                         ((const struct rte_flow_item_ipv4 *)
13401                                          (items->spec))->hdr.next_proto_id;
13402                                 next_protocol &=
13403                                         ((const struct rte_flow_item_ipv4 *)
13404                                          (items->mask))->hdr.next_proto_id;
13405                         } else {
13406                                 /* Reset for inner layer. */
13407                                 next_protocol = 0xff;
13408                         }
13409                         break;
13410                 case RTE_FLOW_ITEM_TYPE_IPV6:
13411                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13412                                                   &item_flags, &tunnel);
13413                         flow_dv_translate_item_ipv6(match_mask, match_value,
13414                                                     items, tunnel,
13415                                                     dev_flow->dv.group);
13416                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13417                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
13418                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
13419                         if (items->mask != NULL &&
13420                             ((const struct rte_flow_item_ipv6 *)
13421                              items->mask)->hdr.proto) {
13422                                 next_protocol =
13423                                         ((const struct rte_flow_item_ipv6 *)
13424                                          items->spec)->hdr.proto;
13425                                 next_protocol &=
13426                                         ((const struct rte_flow_item_ipv6 *)
13427                                          items->mask)->hdr.proto;
13428                         } else {
13429                                 /* Reset for inner layer. */
13430                                 next_protocol = 0xff;
13431                         }
13432                         break;
13433                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
13434                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
13435                                                              match_value,
13436                                                              items, tunnel);
13437                         last_item = tunnel ?
13438                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
13439                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
13440                         if (items->mask != NULL &&
13441                             ((const struct rte_flow_item_ipv6_frag_ext *)
13442                              items->mask)->hdr.next_header) {
13443                                 next_protocol =
13444                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13445                                  items->spec)->hdr.next_header;
13446                                 next_protocol &=
13447                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13448                                  items->mask)->hdr.next_header;
13449                         } else {
13450                                 /* Reset for inner layer. */
13451                                 next_protocol = 0xff;
13452                         }
13453                         break;
13454                 case RTE_FLOW_ITEM_TYPE_TCP:
13455                         flow_dv_translate_item_tcp(match_mask, match_value,
13456                                                    items, tunnel);
13457                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13458                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
13459                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
13460                         break;
13461                 case RTE_FLOW_ITEM_TYPE_UDP:
13462                         flow_dv_translate_item_udp(match_mask, match_value,
13463                                                    items, tunnel);
13464                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13465                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
13466                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
13467                         break;
13468                 case RTE_FLOW_ITEM_TYPE_GRE:
13469                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13470                         last_item = MLX5_FLOW_LAYER_GRE;
13471                         tunnel_item = items;
13472                         break;
13473                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
13474                         flow_dv_translate_item_gre_key(match_mask,
13475                                                        match_value, items);
13476                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
13477                         break;
13478                 case RTE_FLOW_ITEM_TYPE_NVGRE:
13479                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13480                         last_item = MLX5_FLOW_LAYER_GRE;
13481                         tunnel_item = items;
13482                         break;
13483                 case RTE_FLOW_ITEM_TYPE_VXLAN:
13484                         flow_dv_translate_item_vxlan(dev, attr,
13485                                                      match_mask, match_value,
13486                                                      items, tunnel);
13487                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13488                         last_item = MLX5_FLOW_LAYER_VXLAN;
13489                         break;
13490                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
13491                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13492                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
13493                         tunnel_item = items;
13494                         break;
13495                 case RTE_FLOW_ITEM_TYPE_GENEVE:
13496                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13497                         last_item = MLX5_FLOW_LAYER_GENEVE;
13498                         tunnel_item = items;
13499                         break;
13500                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
13501                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
13502                                                           match_value,
13503                                                           items, error);
13504                         if (ret)
13505                                 return rte_flow_error_set(error, -ret,
13506                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13507                                         "cannot create GENEVE TLV option");
13508                         flow->geneve_tlv_option = 1;
13509                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
13510                         break;
13511                 case RTE_FLOW_ITEM_TYPE_MPLS:
13512                         flow_dv_translate_item_mpls(match_mask, match_value,
13513                                                     items, last_item, tunnel);
13514                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13515                         last_item = MLX5_FLOW_LAYER_MPLS;
13516                         break;
13517                 case RTE_FLOW_ITEM_TYPE_MARK:
13518                         flow_dv_translate_item_mark(dev, match_mask,
13519                                                     match_value, items);
13520                         last_item = MLX5_FLOW_ITEM_MARK;
13521                         break;
13522                 case RTE_FLOW_ITEM_TYPE_META:
13523                         flow_dv_translate_item_meta(dev, match_mask,
13524                                                     match_value, attr, items);
13525                         last_item = MLX5_FLOW_ITEM_METADATA;
13526                         break;
13527                 case RTE_FLOW_ITEM_TYPE_ICMP:
13528                         flow_dv_translate_item_icmp(match_mask, match_value,
13529                                                     items, tunnel);
13530                         last_item = MLX5_FLOW_LAYER_ICMP;
13531                         break;
13532                 case RTE_FLOW_ITEM_TYPE_ICMP6:
13533                         flow_dv_translate_item_icmp6(match_mask, match_value,
13534                                                       items, tunnel);
13535                         last_item = MLX5_FLOW_LAYER_ICMP6;
13536                         break;
13537                 case RTE_FLOW_ITEM_TYPE_TAG:
13538                         flow_dv_translate_item_tag(dev, match_mask,
13539                                                    match_value, items);
13540                         last_item = MLX5_FLOW_ITEM_TAG;
13541                         break;
13542                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
13543                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
13544                                                         match_value, items);
13545                         last_item = MLX5_FLOW_ITEM_TAG;
13546                         break;
13547                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
13548                         flow_dv_translate_item_tx_queue(dev, match_mask,
13549                                                         match_value,
13550                                                         items);
13551                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
13552                         break;
13553                 case RTE_FLOW_ITEM_TYPE_GTP:
13554                         flow_dv_translate_item_gtp(match_mask, match_value,
13555                                                    items, tunnel);
13556                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13557                         last_item = MLX5_FLOW_LAYER_GTP;
13558                         break;
13559                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
13560                         ret = flow_dv_translate_item_gtp_psc(match_mask,
13561                                                           match_value,
13562                                                           items);
13563                         if (ret)
13564                                 return rte_flow_error_set(error, -ret,
13565                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13566                                         "cannot create GTP PSC item");
13567                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
13568                         break;
13569                 case RTE_FLOW_ITEM_TYPE_ECPRI:
13570                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
13571                                 /* Create it only the first time to be used. */
13572                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
13573                                 if (ret)
13574                                         return rte_flow_error_set
13575                                                 (error, -ret,
13576                                                 RTE_FLOW_ERROR_TYPE_ITEM,
13577                                                 NULL,
13578                                                 "cannot create eCPRI parser");
13579                         }
13580                         flow_dv_translate_item_ecpri(dev, match_mask,
13581                                                      match_value, items,
13582                                                      last_item);
13583                         /* No other protocol should follow eCPRI layer. */
13584                         last_item = MLX5_FLOW_LAYER_ECPRI;
13585                         break;
13586                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
13587                         flow_dv_translate_item_integrity(items, integrity_items,
13588                                                          &last_item);
13589                         break;
13590                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
13591                         flow_dv_translate_item_aso_ct(dev, match_mask,
13592                                                       match_value, items);
13593                         break;
13594                 case RTE_FLOW_ITEM_TYPE_FLEX:
13595                         flow_dv_translate_item_flex(dev, match_mask,
13596                                                     match_value, items,
13597                                                     dev_flow, tunnel != 0);
13598                         last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
13599                                     MLX5_FLOW_ITEM_OUTER_FLEX;
13600                         break;
13601                 default:
13602                         break;
13603                 }
13604                 item_flags |= last_item;
13605         }
13606         /*
13607          * When E-Switch mode is enabled, we have two cases where we need to
13608          * set the source port manually.
13609          * The first one, is in case of Nic steering rule, and the second is
13610          * E-Switch rule where no port_id item was found. In both cases
13611          * the source port is set according the current port in use.
13612          */
13613         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
13614             (priv->representor || priv->master)) {
13615                 if (flow_dv_translate_item_port_id(dev, match_mask,
13616                                                    match_value, NULL, attr))
13617                         return -rte_errno;
13618         }
13619         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
13620                 flow_dv_translate_item_integrity_post(match_mask, match_value,
13621                                                       integrity_items,
13622                                                       item_flags);
13623         }
13624         if (item_flags & MLX5_FLOW_LAYER_VXLAN_GPE)
13625                 flow_dv_translate_item_vxlan_gpe(match_mask, match_value,
13626                                                  tunnel_item, item_flags);
13627         else if (item_flags & MLX5_FLOW_LAYER_GENEVE)
13628                 flow_dv_translate_item_geneve(match_mask, match_value,
13629                                               tunnel_item, item_flags);
13630         else if (item_flags & MLX5_FLOW_LAYER_GRE) {
13631                 if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE)
13632                         flow_dv_translate_item_gre(match_mask, match_value,
13633                                                    tunnel_item, item_flags);
13634                 else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
13635                         flow_dv_translate_item_nvgre(match_mask, match_value,
13636                                                      tunnel_item, item_flags);
13637                 else
13638                         MLX5_ASSERT(false);
13639         }
13640 #ifdef RTE_LIBRTE_MLX5_DEBUG
13641         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
13642                                               dev_flow->dv.value.buf));
13643 #endif
13644         /*
13645          * Layers may be already initialized from prefix flow if this dev_flow
13646          * is the suffix flow.
13647          */
13648         handle->layers |= item_flags;
13649         if (action_flags & MLX5_FLOW_ACTION_RSS)
13650                 flow_dv_hashfields_set(dev_flow, rss_desc);
13651         /* If has RSS action in the sample action, the Sample/Mirror resource
13652          * should be registered after the hash filed be update.
13653          */
13654         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
13655                 ret = flow_dv_translate_action_sample(dev,
13656                                                       sample,
13657                                                       dev_flow, attr,
13658                                                       &num_of_dest,
13659                                                       sample_actions,
13660                                                       &sample_res,
13661                                                       error);
13662                 if (ret < 0)
13663                         return ret;
13664                 ret = flow_dv_create_action_sample(dev,
13665                                                    dev_flow,
13666                                                    num_of_dest,
13667                                                    &sample_res,
13668                                                    &mdest_res,
13669                                                    sample_actions,
13670                                                    action_flags,
13671                                                    error);
13672                 if (ret < 0)
13673                         return rte_flow_error_set
13674                                                 (error, rte_errno,
13675                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13676                                                 NULL,
13677                                                 "cannot create sample action");
13678                 if (num_of_dest > 1) {
13679                         dev_flow->dv.actions[sample_act_pos] =
13680                         dev_flow->dv.dest_array_res->action;
13681                 } else {
13682                         dev_flow->dv.actions[sample_act_pos] =
13683                         dev_flow->dv.sample_res->verbs_action;
13684                 }
13685         }
13686         /*
13687          * For multiple destination (sample action with ratio=1), the encap
13688          * action and port id action will be combined into group action.
13689          * So need remove the original these actions in the flow and only
13690          * use the sample action instead of.
13691          */
13692         if (num_of_dest > 1 &&
13693             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
13694                 int i;
13695                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
13696
13697                 for (i = 0; i < actions_n; i++) {
13698                         if ((sample_act->dr_encap_action &&
13699                                 sample_act->dr_encap_action ==
13700                                 dev_flow->dv.actions[i]) ||
13701                                 (sample_act->dr_port_id_action &&
13702                                 sample_act->dr_port_id_action ==
13703                                 dev_flow->dv.actions[i]) ||
13704                                 (sample_act->dr_jump_action &&
13705                                 sample_act->dr_jump_action ==
13706                                 dev_flow->dv.actions[i]))
13707                                 continue;
13708                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
13709                 }
13710                 memcpy((void *)dev_flow->dv.actions,
13711                                 (void *)temp_actions,
13712                                 tmp_actions_n * sizeof(void *));
13713                 actions_n = tmp_actions_n;
13714         }
13715         dev_flow->dv.actions_n = actions_n;
13716         dev_flow->act_flags = action_flags;
13717         if (wks->skip_matcher_reg)
13718                 return 0;
13719         /* Register matcher. */
13720         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
13721                                     matcher.mask.size);
13722         matcher.priority = mlx5_get_matcher_priority(dev, attr,
13723                                                      matcher.priority,
13724                                                      dev_flow->external);
13725         /**
13726          * When creating meter drop flow in drop table, using original
13727          * 5-tuple match, the matcher priority should be lower than
13728          * mtr_id matcher.
13729          */
13730         if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
13731             dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP &&
13732             matcher.priority <= MLX5_REG_BITS)
13733                 matcher.priority += MLX5_REG_BITS;
13734         /* reserved field no needs to be set to 0 here. */
13735         tbl_key.is_fdb = attr->transfer;
13736         tbl_key.is_egress = attr->egress;
13737         tbl_key.level = dev_flow->dv.group;
13738         tbl_key.id = dev_flow->dv.table_id;
13739         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
13740                                      tunnel, attr->group, error))
13741                 return -rte_errno;
13742         return 0;
13743 }
13744
13745 /**
13746  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13747  * and tunnel.
13748  *
13749  * @param[in, out] action
13750  *   Shred RSS action holding hash RX queue objects.
13751  * @param[in] hash_fields
13752  *   Defines combination of packet fields to participate in RX hash.
13753  * @param[in] tunnel
13754  *   Tunnel type
13755  * @param[in] hrxq_idx
13756  *   Hash RX queue index to set.
13757  *
13758  * @return
13759  *   0 on success, otherwise negative errno value.
13760  */
13761 static int
13762 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
13763                               const uint64_t hash_fields,
13764                               uint32_t hrxq_idx)
13765 {
13766         uint32_t *hrxqs = action->hrxq;
13767
13768         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13769         case MLX5_RSS_HASH_IPV4:
13770                 /* fall-through. */
13771         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13772                 /* fall-through. */
13773         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13774                 hrxqs[0] = hrxq_idx;
13775                 return 0;
13776         case MLX5_RSS_HASH_IPV4_TCP:
13777                 /* fall-through. */
13778         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13779                 /* fall-through. */
13780         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13781                 hrxqs[1] = hrxq_idx;
13782                 return 0;
13783         case MLX5_RSS_HASH_IPV4_UDP:
13784                 /* fall-through. */
13785         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13786                 /* fall-through. */
13787         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13788                 hrxqs[2] = hrxq_idx;
13789                 return 0;
13790         case MLX5_RSS_HASH_IPV6:
13791                 /* fall-through. */
13792         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13793                 /* fall-through. */
13794         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13795                 hrxqs[3] = hrxq_idx;
13796                 return 0;
13797         case MLX5_RSS_HASH_IPV6_TCP:
13798                 /* fall-through. */
13799         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13800                 /* fall-through. */
13801         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13802                 hrxqs[4] = hrxq_idx;
13803                 return 0;
13804         case MLX5_RSS_HASH_IPV6_UDP:
13805                 /* fall-through. */
13806         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13807                 /* fall-through. */
13808         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13809                 hrxqs[5] = hrxq_idx;
13810                 return 0;
13811         case MLX5_RSS_HASH_NONE:
13812                 hrxqs[6] = hrxq_idx;
13813                 return 0;
13814         default:
13815                 return -1;
13816         }
13817 }
13818
13819 /**
13820  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13821  * and tunnel.
13822  *
13823  * @param[in] dev
13824  *   Pointer to the Ethernet device structure.
13825  * @param[in] idx
13826  *   Shared RSS action ID holding hash RX queue objects.
13827  * @param[in] hash_fields
13828  *   Defines combination of packet fields to participate in RX hash.
13829  * @param[in] tunnel
13830  *   Tunnel type
13831  *
13832  * @return
13833  *   Valid hash RX queue index, otherwise 0.
13834  */
13835 static uint32_t
13836 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
13837                                  const uint64_t hash_fields)
13838 {
13839         struct mlx5_priv *priv = dev->data->dev_private;
13840         struct mlx5_shared_action_rss *shared_rss =
13841             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13842         const uint32_t *hrxqs = shared_rss->hrxq;
13843
13844         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13845         case MLX5_RSS_HASH_IPV4:
13846                 /* fall-through. */
13847         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13848                 /* fall-through. */
13849         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13850                 return hrxqs[0];
13851         case MLX5_RSS_HASH_IPV4_TCP:
13852                 /* fall-through. */
13853         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13854                 /* fall-through. */
13855         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13856                 return hrxqs[1];
13857         case MLX5_RSS_HASH_IPV4_UDP:
13858                 /* fall-through. */
13859         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13860                 /* fall-through. */
13861         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13862                 return hrxqs[2];
13863         case MLX5_RSS_HASH_IPV6:
13864                 /* fall-through. */
13865         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13866                 /* fall-through. */
13867         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13868                 return hrxqs[3];
13869         case MLX5_RSS_HASH_IPV6_TCP:
13870                 /* fall-through. */
13871         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13872                 /* fall-through. */
13873         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13874                 return hrxqs[4];
13875         case MLX5_RSS_HASH_IPV6_UDP:
13876                 /* fall-through. */
13877         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13878                 /* fall-through. */
13879         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13880                 return hrxqs[5];
13881         case MLX5_RSS_HASH_NONE:
13882                 return hrxqs[6];
13883         default:
13884                 return 0;
13885         }
13886
13887 }
13888
13889 /**
13890  * Apply the flow to the NIC, lock free,
13891  * (mutex should be acquired by caller).
13892  *
13893  * @param[in] dev
13894  *   Pointer to the Ethernet device structure.
13895  * @param[in, out] flow
13896  *   Pointer to flow structure.
13897  * @param[out] error
13898  *   Pointer to error structure.
13899  *
13900  * @return
13901  *   0 on success, a negative errno value otherwise and rte_errno is set.
13902  */
13903 static int
13904 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
13905               struct rte_flow_error *error)
13906 {
13907         struct mlx5_flow_dv_workspace *dv;
13908         struct mlx5_flow_handle *dh;
13909         struct mlx5_flow_handle_dv *dv_h;
13910         struct mlx5_flow *dev_flow;
13911         struct mlx5_priv *priv = dev->data->dev_private;
13912         uint32_t handle_idx;
13913         int n;
13914         int err;
13915         int idx;
13916         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
13917         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
13918         uint8_t misc_mask;
13919
13920         MLX5_ASSERT(wks);
13921         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
13922                 dev_flow = &wks->flows[idx];
13923                 dv = &dev_flow->dv;
13924                 dh = dev_flow->handle;
13925                 dv_h = &dh->dvh;
13926                 n = dv->actions_n;
13927                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
13928                         if (dv->transfer) {
13929                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13930                                 dv->actions[n++] = priv->sh->dr_drop_action;
13931                         } else {
13932 #ifdef HAVE_MLX5DV_DR
13933                                 /* DR supports drop action placeholder. */
13934                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13935                                 dv->actions[n++] = dv->group ?
13936                                         priv->sh->dr_drop_action :
13937                                         priv->root_drop_action;
13938 #else
13939                                 /* For DV we use the explicit drop queue. */
13940                                 MLX5_ASSERT(priv->drop_queue.hrxq);
13941                                 dv->actions[n++] =
13942                                                 priv->drop_queue.hrxq->action;
13943 #endif
13944                         }
13945                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
13946                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
13947                         struct mlx5_hrxq *hrxq;
13948                         uint32_t hrxq_idx;
13949
13950                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
13951                                                     &hrxq_idx);
13952                         if (!hrxq) {
13953                                 rte_flow_error_set
13954                                         (error, rte_errno,
13955                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13956                                          "cannot get hash queue");
13957                                 goto error;
13958                         }
13959                         dh->rix_hrxq = hrxq_idx;
13960                         dv->actions[n++] = hrxq->action;
13961                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13962                         struct mlx5_hrxq *hrxq = NULL;
13963                         uint32_t hrxq_idx;
13964
13965                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
13966                                                 rss_desc->shared_rss,
13967                                                 dev_flow->hash_fields);
13968                         if (hrxq_idx)
13969                                 hrxq = mlx5_ipool_get
13970                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
13971                                          hrxq_idx);
13972                         if (!hrxq) {
13973                                 rte_flow_error_set
13974                                         (error, rte_errno,
13975                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13976                                          "cannot get hash queue");
13977                                 goto error;
13978                         }
13979                         dh->rix_srss = rss_desc->shared_rss;
13980                         dv->actions[n++] = hrxq->action;
13981                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
13982                         if (!priv->sh->default_miss_action) {
13983                                 rte_flow_error_set
13984                                         (error, rte_errno,
13985                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13986                                          "default miss action not be created.");
13987                                 goto error;
13988                         }
13989                         dv->actions[n++] = priv->sh->default_miss_action;
13990                 }
13991                 misc_mask = flow_dv_matcher_enable(dv->value.buf);
13992                 __flow_dv_adjust_buf_size(&dv->value.size, misc_mask);
13993                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
13994                                                (void *)&dv->value, n,
13995                                                dv->actions, &dh->drv_flow);
13996                 if (err) {
13997                         rte_flow_error_set
13998                                 (error, errno,
13999                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14000                                 NULL,
14001                                 (!priv->config.allow_duplicate_pattern &&
14002                                 errno == EEXIST) ?
14003                                 "duplicating pattern is not allowed" :
14004                                 "hardware refuses to create flow");
14005                         goto error;
14006                 }
14007                 if (priv->vmwa_context &&
14008                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
14009                         /*
14010                          * The rule contains the VLAN pattern.
14011                          * For VF we are going to create VLAN
14012                          * interface to make hypervisor set correct
14013                          * e-Switch vport context.
14014                          */
14015                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
14016                 }
14017         }
14018         return 0;
14019 error:
14020         err = rte_errno; /* Save rte_errno before cleanup. */
14021         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
14022                        handle_idx, dh, next) {
14023                 /* hrxq is union, don't clear it if the flag is not set. */
14024                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
14025                         mlx5_hrxq_release(dev, dh->rix_hrxq);
14026                         dh->rix_hrxq = 0;
14027                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
14028                         dh->rix_srss = 0;
14029                 }
14030                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
14031                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
14032         }
14033         rte_errno = err; /* Restore rte_errno. */
14034         return -rte_errno;
14035 }
14036
14037 void
14038 flow_dv_matcher_remove_cb(void *tool_ctx __rte_unused,
14039                           struct mlx5_list_entry *entry)
14040 {
14041         struct mlx5_flow_dv_matcher *resource = container_of(entry,
14042                                                              typeof(*resource),
14043                                                              entry);
14044
14045         claim_zero(mlx5_flow_os_destroy_flow_matcher(resource->matcher_object));
14046         mlx5_free(resource);
14047 }
14048
14049 /**
14050  * Release the flow matcher.
14051  *
14052  * @param dev
14053  *   Pointer to Ethernet device.
14054  * @param port_id
14055  *   Index to port ID action resource.
14056  *
14057  * @return
14058  *   1 while a reference on it exists, 0 when freed.
14059  */
14060 static int
14061 flow_dv_matcher_release(struct rte_eth_dev *dev,
14062                         struct mlx5_flow_handle *handle)
14063 {
14064         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
14065         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
14066                                                             typeof(*tbl), tbl);
14067         int ret;
14068
14069         MLX5_ASSERT(matcher->matcher_object);
14070         ret = mlx5_list_unregister(tbl->matchers, &matcher->entry);
14071         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
14072         return ret;
14073 }
14074
14075 void
14076 flow_dv_encap_decap_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14077 {
14078         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14079         struct mlx5_flow_dv_encap_decap_resource *res =
14080                                        container_of(entry, typeof(*res), entry);
14081
14082         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
14083         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
14084 }
14085
14086 /**
14087  * Release an encap/decap resource.
14088  *
14089  * @param dev
14090  *   Pointer to Ethernet device.
14091  * @param encap_decap_idx
14092  *   Index of encap decap resource.
14093  *
14094  * @return
14095  *   1 while a reference on it exists, 0 when freed.
14096  */
14097 static int
14098 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
14099                                      uint32_t encap_decap_idx)
14100 {
14101         struct mlx5_priv *priv = dev->data->dev_private;
14102         struct mlx5_flow_dv_encap_decap_resource *resource;
14103
14104         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
14105                                   encap_decap_idx);
14106         if (!resource)
14107                 return 0;
14108         MLX5_ASSERT(resource->action);
14109         return mlx5_hlist_unregister(priv->sh->encaps_decaps, &resource->entry);
14110 }
14111
14112 /**
14113  * Release an jump to table action resource.
14114  *
14115  * @param dev
14116  *   Pointer to Ethernet device.
14117  * @param rix_jump
14118  *   Index to the jump action resource.
14119  *
14120  * @return
14121  *   1 while a reference on it exists, 0 when freed.
14122  */
14123 static int
14124 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
14125                                   uint32_t rix_jump)
14126 {
14127         struct mlx5_priv *priv = dev->data->dev_private;
14128         struct mlx5_flow_tbl_data_entry *tbl_data;
14129
14130         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
14131                                   rix_jump);
14132         if (!tbl_data)
14133                 return 0;
14134         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
14135 }
14136
14137 void
14138 flow_dv_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14139 {
14140         struct mlx5_flow_dv_modify_hdr_resource *res =
14141                 container_of(entry, typeof(*res), entry);
14142         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14143
14144         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
14145         mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
14146 }
14147
14148 /**
14149  * Release a modify-header resource.
14150  *
14151  * @param dev
14152  *   Pointer to Ethernet device.
14153  * @param handle
14154  *   Pointer to mlx5_flow_handle.
14155  *
14156  * @return
14157  *   1 while a reference on it exists, 0 when freed.
14158  */
14159 static int
14160 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
14161                                     struct mlx5_flow_handle *handle)
14162 {
14163         struct mlx5_priv *priv = dev->data->dev_private;
14164         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
14165
14166         MLX5_ASSERT(entry->action);
14167         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
14168 }
14169
14170 void
14171 flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14172 {
14173         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14174         struct mlx5_flow_dv_port_id_action_resource *resource =
14175                                   container_of(entry, typeof(*resource), entry);
14176
14177         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14178         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
14179 }
14180
14181 /**
14182  * Release port ID action resource.
14183  *
14184  * @param dev
14185  *   Pointer to Ethernet device.
14186  * @param handle
14187  *   Pointer to mlx5_flow_handle.
14188  *
14189  * @return
14190  *   1 while a reference on it exists, 0 when freed.
14191  */
14192 static int
14193 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
14194                                         uint32_t port_id)
14195 {
14196         struct mlx5_priv *priv = dev->data->dev_private;
14197         struct mlx5_flow_dv_port_id_action_resource *resource;
14198
14199         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
14200         if (!resource)
14201                 return 0;
14202         MLX5_ASSERT(resource->action);
14203         return mlx5_list_unregister(priv->sh->port_id_action_list,
14204                                     &resource->entry);
14205 }
14206
14207 /**
14208  * Release shared RSS action resource.
14209  *
14210  * @param dev
14211  *   Pointer to Ethernet device.
14212  * @param srss
14213  *   Shared RSS action index.
14214  */
14215 static void
14216 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
14217 {
14218         struct mlx5_priv *priv = dev->data->dev_private;
14219         struct mlx5_shared_action_rss *shared_rss;
14220
14221         shared_rss = mlx5_ipool_get
14222                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
14223         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14224 }
14225
14226 void
14227 flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14228 {
14229         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14230         struct mlx5_flow_dv_push_vlan_action_resource *resource =
14231                         container_of(entry, typeof(*resource), entry);
14232
14233         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14234         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
14235 }
14236
14237 /**
14238  * Release push vlan action resource.
14239  *
14240  * @param dev
14241  *   Pointer to Ethernet device.
14242  * @param handle
14243  *   Pointer to mlx5_flow_handle.
14244  *
14245  * @return
14246  *   1 while a reference on it exists, 0 when freed.
14247  */
14248 static int
14249 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
14250                                           struct mlx5_flow_handle *handle)
14251 {
14252         struct mlx5_priv *priv = dev->data->dev_private;
14253         struct mlx5_flow_dv_push_vlan_action_resource *resource;
14254         uint32_t idx = handle->dvh.rix_push_vlan;
14255
14256         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
14257         if (!resource)
14258                 return 0;
14259         MLX5_ASSERT(resource->action);
14260         return mlx5_list_unregister(priv->sh->push_vlan_action_list,
14261                                     &resource->entry);
14262 }
14263
14264 /**
14265  * Release the fate resource.
14266  *
14267  * @param dev
14268  *   Pointer to Ethernet device.
14269  * @param handle
14270  *   Pointer to mlx5_flow_handle.
14271  */
14272 static void
14273 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
14274                                struct mlx5_flow_handle *handle)
14275 {
14276         if (!handle->rix_fate)
14277                 return;
14278         switch (handle->fate_action) {
14279         case MLX5_FLOW_FATE_QUEUE:
14280                 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
14281                         mlx5_hrxq_release(dev, handle->rix_hrxq);
14282                 break;
14283         case MLX5_FLOW_FATE_JUMP:
14284                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
14285                 break;
14286         case MLX5_FLOW_FATE_PORT_ID:
14287                 flow_dv_port_id_action_resource_release(dev,
14288                                 handle->rix_port_id_action);
14289                 break;
14290         default:
14291                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
14292                 break;
14293         }
14294         handle->rix_fate = 0;
14295 }
14296
14297 void
14298 flow_dv_sample_remove_cb(void *tool_ctx __rte_unused,
14299                          struct mlx5_list_entry *entry)
14300 {
14301         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
14302                                                               typeof(*resource),
14303                                                               entry);
14304         struct rte_eth_dev *dev = resource->dev;
14305         struct mlx5_priv *priv = dev->data->dev_private;
14306
14307         if (resource->verbs_action)
14308                 claim_zero(mlx5_flow_os_destroy_flow_action
14309                                                       (resource->verbs_action));
14310         if (resource->normal_path_tbl)
14311                 flow_dv_tbl_resource_release(MLX5_SH(dev),
14312                                              resource->normal_path_tbl);
14313         flow_dv_sample_sub_actions_release(dev, &resource->sample_idx);
14314         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
14315         DRV_LOG(DEBUG, "sample resource %p: removed", (void *)resource);
14316 }
14317
14318 /**
14319  * Release an sample resource.
14320  *
14321  * @param dev
14322  *   Pointer to Ethernet device.
14323  * @param handle
14324  *   Pointer to mlx5_flow_handle.
14325  *
14326  * @return
14327  *   1 while a reference on it exists, 0 when freed.
14328  */
14329 static int
14330 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
14331                                      struct mlx5_flow_handle *handle)
14332 {
14333         struct mlx5_priv *priv = dev->data->dev_private;
14334         struct mlx5_flow_dv_sample_resource *resource;
14335
14336         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
14337                                   handle->dvh.rix_sample);
14338         if (!resource)
14339                 return 0;
14340         MLX5_ASSERT(resource->verbs_action);
14341         return mlx5_list_unregister(priv->sh->sample_action_list,
14342                                     &resource->entry);
14343 }
14344
14345 void
14346 flow_dv_dest_array_remove_cb(void *tool_ctx __rte_unused,
14347                              struct mlx5_list_entry *entry)
14348 {
14349         struct mlx5_flow_dv_dest_array_resource *resource =
14350                         container_of(entry, typeof(*resource), entry);
14351         struct rte_eth_dev *dev = resource->dev;
14352         struct mlx5_priv *priv = dev->data->dev_private;
14353         uint32_t i = 0;
14354
14355         MLX5_ASSERT(resource->action);
14356         if (resource->action)
14357                 claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14358         for (; i < resource->num_of_dest; i++)
14359                 flow_dv_sample_sub_actions_release(dev,
14360                                                    &resource->sample_idx[i]);
14361         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
14362         DRV_LOG(DEBUG, "destination array resource %p: removed",
14363                 (void *)resource);
14364 }
14365
14366 /**
14367  * Release an destination array resource.
14368  *
14369  * @param dev
14370  *   Pointer to Ethernet device.
14371  * @param handle
14372  *   Pointer to mlx5_flow_handle.
14373  *
14374  * @return
14375  *   1 while a reference on it exists, 0 when freed.
14376  */
14377 static int
14378 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
14379                                     struct mlx5_flow_handle *handle)
14380 {
14381         struct mlx5_priv *priv = dev->data->dev_private;
14382         struct mlx5_flow_dv_dest_array_resource *resource;
14383
14384         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
14385                                   handle->dvh.rix_dest_array);
14386         if (!resource)
14387                 return 0;
14388         MLX5_ASSERT(resource->action);
14389         return mlx5_list_unregister(priv->sh->dest_array_list,
14390                                     &resource->entry);
14391 }
14392
14393 static void
14394 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
14395 {
14396         struct mlx5_priv *priv = dev->data->dev_private;
14397         struct mlx5_dev_ctx_shared *sh = priv->sh;
14398         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
14399                                 sh->geneve_tlv_option_resource;
14400         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
14401         if (geneve_opt_resource) {
14402                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
14403                                          __ATOMIC_RELAXED))) {
14404                         claim_zero(mlx5_devx_cmd_destroy
14405                                         (geneve_opt_resource->obj));
14406                         mlx5_free(sh->geneve_tlv_option_resource);
14407                         sh->geneve_tlv_option_resource = NULL;
14408                 }
14409         }
14410         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
14411 }
14412
14413 /**
14414  * Remove the flow from the NIC but keeps it in memory.
14415  * Lock free, (mutex should be acquired by caller).
14416  *
14417  * @param[in] dev
14418  *   Pointer to Ethernet device.
14419  * @param[in, out] flow
14420  *   Pointer to flow structure.
14421  */
14422 static void
14423 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
14424 {
14425         struct mlx5_flow_handle *dh;
14426         uint32_t handle_idx;
14427         struct mlx5_priv *priv = dev->data->dev_private;
14428
14429         if (!flow)
14430                 return;
14431         handle_idx = flow->dev_handles;
14432         while (handle_idx) {
14433                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14434                                     handle_idx);
14435                 if (!dh)
14436                         return;
14437                 if (dh->drv_flow) {
14438                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
14439                         dh->drv_flow = NULL;
14440                 }
14441                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
14442                         flow_dv_fate_resource_release(dev, dh);
14443                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
14444                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
14445                 handle_idx = dh->next.next;
14446         }
14447 }
14448
14449 /**
14450  * Remove the flow from the NIC and the memory.
14451  * Lock free, (mutex should be acquired by caller).
14452  *
14453  * @param[in] dev
14454  *   Pointer to the Ethernet device structure.
14455  * @param[in, out] flow
14456  *   Pointer to flow structure.
14457  */
14458 static void
14459 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
14460 {
14461         struct mlx5_flow_handle *dev_handle;
14462         struct mlx5_priv *priv = dev->data->dev_private;
14463         struct mlx5_flow_meter_info *fm = NULL;
14464         uint32_t srss = 0;
14465
14466         if (!flow)
14467                 return;
14468         flow_dv_remove(dev, flow);
14469         if (flow->counter) {
14470                 flow_dv_counter_free(dev, flow->counter);
14471                 flow->counter = 0;
14472         }
14473         if (flow->meter) {
14474                 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
14475                 if (fm)
14476                         mlx5_flow_meter_detach(priv, fm);
14477                 flow->meter = 0;
14478         }
14479         /* Keep the current age handling by default. */
14480         if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
14481                 flow_dv_aso_ct_release(dev, flow->ct, NULL);
14482         else if (flow->age)
14483                 flow_dv_aso_age_release(dev, flow->age);
14484         if (flow->geneve_tlv_option) {
14485                 flow_dv_geneve_tlv_option_resource_release(dev);
14486                 flow->geneve_tlv_option = 0;
14487         }
14488         while (flow->dev_handles) {
14489                 uint32_t tmp_idx = flow->dev_handles;
14490
14491                 dev_handle = mlx5_ipool_get(priv->sh->ipool
14492                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
14493                 if (!dev_handle)
14494                         return;
14495                 flow->dev_handles = dev_handle->next.next;
14496                 while (dev_handle->flex_item) {
14497                         int index = rte_bsf32(dev_handle->flex_item);
14498
14499                         mlx5_flex_release_index(dev, index);
14500                         dev_handle->flex_item &= ~RTE_BIT32(index);
14501                 }
14502                 if (dev_handle->dvh.matcher)
14503                         flow_dv_matcher_release(dev, dev_handle);
14504                 if (dev_handle->dvh.rix_sample)
14505                         flow_dv_sample_resource_release(dev, dev_handle);
14506                 if (dev_handle->dvh.rix_dest_array)
14507                         flow_dv_dest_array_resource_release(dev, dev_handle);
14508                 if (dev_handle->dvh.rix_encap_decap)
14509                         flow_dv_encap_decap_resource_release(dev,
14510                                 dev_handle->dvh.rix_encap_decap);
14511                 if (dev_handle->dvh.modify_hdr)
14512                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
14513                 if (dev_handle->dvh.rix_push_vlan)
14514                         flow_dv_push_vlan_action_resource_release(dev,
14515                                                                   dev_handle);
14516                 if (dev_handle->dvh.rix_tag)
14517                         flow_dv_tag_release(dev,
14518                                             dev_handle->dvh.rix_tag);
14519                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
14520                         flow_dv_fate_resource_release(dev, dev_handle);
14521                 else if (!srss)
14522                         srss = dev_handle->rix_srss;
14523                 if (fm && dev_handle->is_meter_flow_id &&
14524                     dev_handle->split_flow_id)
14525                         mlx5_ipool_free(fm->flow_ipool,
14526                                         dev_handle->split_flow_id);
14527                 else if (dev_handle->split_flow_id &&
14528                     !dev_handle->is_meter_flow_id)
14529                         mlx5_ipool_free(priv->sh->ipool
14530                                         [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
14531                                         dev_handle->split_flow_id);
14532                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14533                            tmp_idx);
14534         }
14535         if (srss)
14536                 flow_dv_shared_rss_action_release(dev, srss);
14537 }
14538
14539 /**
14540  * Release array of hash RX queue objects.
14541  * Helper function.
14542  *
14543  * @param[in] dev
14544  *   Pointer to the Ethernet device structure.
14545  * @param[in, out] hrxqs
14546  *   Array of hash RX queue objects.
14547  *
14548  * @return
14549  *   Total number of references to hash RX queue objects in *hrxqs* array
14550  *   after this operation.
14551  */
14552 static int
14553 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
14554                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
14555 {
14556         size_t i;
14557         int remaining = 0;
14558
14559         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
14560                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
14561
14562                 if (!ret)
14563                         (*hrxqs)[i] = 0;
14564                 remaining += ret;
14565         }
14566         return remaining;
14567 }
14568
14569 /**
14570  * Release all hash RX queue objects representing shared RSS action.
14571  *
14572  * @param[in] dev
14573  *   Pointer to the Ethernet device structure.
14574  * @param[in, out] action
14575  *   Shared RSS action to remove hash RX queue objects from.
14576  *
14577  * @return
14578  *   Total number of references to hash RX queue objects stored in *action*
14579  *   after this operation.
14580  *   Expected to be 0 if no external references held.
14581  */
14582 static int
14583 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
14584                                  struct mlx5_shared_action_rss *shared_rss)
14585 {
14586         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
14587 }
14588
14589 /**
14590  * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
14591  * user input.
14592  *
14593  * Only one hash value is available for one L3+L4 combination:
14594  * for example:
14595  * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
14596  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
14597  * same slot in mlx5_rss_hash_fields.
14598  *
14599  * @param[in] rss
14600  *   Pointer to the shared action RSS conf.
14601  * @param[in, out] hash_field
14602  *   hash_field variable needed to be adjusted.
14603  *
14604  * @return
14605  *   void
14606  */
14607 static void
14608 __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
14609                                      uint64_t *hash_field)
14610 {
14611         uint64_t rss_types = rss->origin.types;
14612
14613         switch (*hash_field & ~IBV_RX_HASH_INNER) {
14614         case MLX5_RSS_HASH_IPV4:
14615                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
14616                         *hash_field &= ~MLX5_RSS_HASH_IPV4;
14617                         if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
14618                                 *hash_field |= IBV_RX_HASH_DST_IPV4;
14619                         else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
14620                                 *hash_field |= IBV_RX_HASH_SRC_IPV4;
14621                         else
14622                                 *hash_field |= MLX5_RSS_HASH_IPV4;
14623                 }
14624                 return;
14625         case MLX5_RSS_HASH_IPV6:
14626                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
14627                         *hash_field &= ~MLX5_RSS_HASH_IPV6;
14628                         if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
14629                                 *hash_field |= IBV_RX_HASH_DST_IPV6;
14630                         else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
14631                                 *hash_field |= IBV_RX_HASH_SRC_IPV6;
14632                         else
14633                                 *hash_field |= MLX5_RSS_HASH_IPV6;
14634                 }
14635                 return;
14636         case MLX5_RSS_HASH_IPV4_UDP:
14637                 /* fall-through. */
14638         case MLX5_RSS_HASH_IPV6_UDP:
14639                 if (rss_types & RTE_ETH_RSS_UDP) {
14640                         *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
14641                         if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
14642                                 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
14643                         else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
14644                                 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
14645                         else
14646                                 *hash_field |= MLX5_UDP_IBV_RX_HASH;
14647                 }
14648                 return;
14649         case MLX5_RSS_HASH_IPV4_TCP:
14650                 /* fall-through. */
14651         case MLX5_RSS_HASH_IPV6_TCP:
14652                 if (rss_types & RTE_ETH_RSS_TCP) {
14653                         *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
14654                         if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
14655                                 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
14656                         else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
14657                                 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
14658                         else
14659                                 *hash_field |= MLX5_TCP_IBV_RX_HASH;
14660                 }
14661                 return;
14662         default:
14663                 return;
14664         }
14665 }
14666
14667 /**
14668  * Setup shared RSS action.
14669  * Prepare set of hash RX queue objects sufficient to handle all valid
14670  * hash_fields combinations (see enum ibv_rx_hash_fields).
14671  *
14672  * @param[in] dev
14673  *   Pointer to the Ethernet device structure.
14674  * @param[in] action_idx
14675  *   Shared RSS action ipool index.
14676  * @param[in, out] action
14677  *   Partially initialized shared RSS action.
14678  * @param[out] error
14679  *   Perform verbose error reporting if not NULL. Initialized in case of
14680  *   error only.
14681  *
14682  * @return
14683  *   0 on success, otherwise negative errno value.
14684  */
14685 static int
14686 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
14687                            uint32_t action_idx,
14688                            struct mlx5_shared_action_rss *shared_rss,
14689                            struct rte_flow_error *error)
14690 {
14691         struct mlx5_flow_rss_desc rss_desc = { 0 };
14692         size_t i;
14693         int err;
14694
14695         if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl,
14696                                      !!dev->data->dev_started)) {
14697                 return rte_flow_error_set(error, rte_errno,
14698                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14699                                           "cannot setup indirection table");
14700         }
14701         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
14702         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
14703         rss_desc.const_q = shared_rss->origin.queue;
14704         rss_desc.queue_num = shared_rss->origin.queue_num;
14705         /* Set non-zero value to indicate a shared RSS. */
14706         rss_desc.shared_rss = action_idx;
14707         rss_desc.ind_tbl = shared_rss->ind_tbl;
14708         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
14709                 uint32_t hrxq_idx;
14710                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
14711                 int tunnel = 0;
14712
14713                 __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
14714                 if (shared_rss->origin.level > 1) {
14715                         hash_fields |= IBV_RX_HASH_INNER;
14716                         tunnel = 1;
14717                 }
14718                 rss_desc.tunnel = tunnel;
14719                 rss_desc.hash_fields = hash_fields;
14720                 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
14721                 if (!hrxq_idx) {
14722                         rte_flow_error_set
14723                                 (error, rte_errno,
14724                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14725                                  "cannot get hash queue");
14726                         goto error_hrxq_new;
14727                 }
14728                 err = __flow_dv_action_rss_hrxq_set
14729                         (shared_rss, hash_fields, hrxq_idx);
14730                 MLX5_ASSERT(!err);
14731         }
14732         return 0;
14733 error_hrxq_new:
14734         err = rte_errno;
14735         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14736         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true, true))
14737                 shared_rss->ind_tbl = NULL;
14738         rte_errno = err;
14739         return -rte_errno;
14740 }
14741
14742 /**
14743  * Create shared RSS action.
14744  *
14745  * @param[in] dev
14746  *   Pointer to the Ethernet device structure.
14747  * @param[in] conf
14748  *   Shared action configuration.
14749  * @param[in] rss
14750  *   RSS action specification used to create shared action.
14751  * @param[out] error
14752  *   Perform verbose error reporting if not NULL. Initialized in case of
14753  *   error only.
14754  *
14755  * @return
14756  *   A valid shared action ID in case of success, 0 otherwise and
14757  *   rte_errno is set.
14758  */
14759 static uint32_t
14760 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
14761                             const struct rte_flow_indir_action_conf *conf,
14762                             const struct rte_flow_action_rss *rss,
14763                             struct rte_flow_error *error)
14764 {
14765         struct mlx5_priv *priv = dev->data->dev_private;
14766         struct mlx5_shared_action_rss *shared_rss = NULL;
14767         void *queue = NULL;
14768         struct rte_flow_action_rss *origin;
14769         const uint8_t *rss_key;
14770         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
14771         uint32_t idx;
14772
14773         RTE_SET_USED(conf);
14774         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14775                             0, SOCKET_ID_ANY);
14776         shared_rss = mlx5_ipool_zmalloc
14777                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
14778         if (!shared_rss || !queue) {
14779                 rte_flow_error_set(error, ENOMEM,
14780                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14781                                    "cannot allocate resource memory");
14782                 goto error_rss_init;
14783         }
14784         if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
14785                 rte_flow_error_set(error, E2BIG,
14786                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14787                                    "rss action number out of range");
14788                 goto error_rss_init;
14789         }
14790         shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
14791                                           sizeof(*shared_rss->ind_tbl),
14792                                           0, SOCKET_ID_ANY);
14793         if (!shared_rss->ind_tbl) {
14794                 rte_flow_error_set(error, ENOMEM,
14795                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14796                                    "cannot allocate resource memory");
14797                 goto error_rss_init;
14798         }
14799         memcpy(queue, rss->queue, queue_size);
14800         shared_rss->ind_tbl->queues = queue;
14801         shared_rss->ind_tbl->queues_n = rss->queue_num;
14802         origin = &shared_rss->origin;
14803         origin->func = rss->func;
14804         origin->level = rss->level;
14805         /* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
14806         origin->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
14807         /* NULL RSS key indicates default RSS key. */
14808         rss_key = !rss->key ? rss_hash_default_key : rss->key;
14809         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
14810         origin->key = &shared_rss->key[0];
14811         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
14812         origin->queue = queue;
14813         origin->queue_num = rss->queue_num;
14814         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
14815                 goto error_rss_init;
14816         rte_spinlock_init(&shared_rss->action_rss_sl);
14817         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14818         rte_spinlock_lock(&priv->shared_act_sl);
14819         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14820                      &priv->rss_shared_actions, idx, shared_rss, next);
14821         rte_spinlock_unlock(&priv->shared_act_sl);
14822         return idx;
14823 error_rss_init:
14824         if (shared_rss) {
14825                 if (shared_rss->ind_tbl)
14826                         mlx5_free(shared_rss->ind_tbl);
14827                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14828                                 idx);
14829         }
14830         if (queue)
14831                 mlx5_free(queue);
14832         return 0;
14833 }
14834
14835 /**
14836  * Destroy the shared RSS action.
14837  * Release related hash RX queue objects.
14838  *
14839  * @param[in] dev
14840  *   Pointer to the Ethernet device structure.
14841  * @param[in] idx
14842  *   The shared RSS action object ID to be removed.
14843  * @param[out] error
14844  *   Perform verbose error reporting if not NULL. Initialized in case of
14845  *   error only.
14846  *
14847  * @return
14848  *   0 on success, otherwise negative errno value.
14849  */
14850 static int
14851 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
14852                              struct rte_flow_error *error)
14853 {
14854         struct mlx5_priv *priv = dev->data->dev_private;
14855         struct mlx5_shared_action_rss *shared_rss =
14856             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14857         uint32_t old_refcnt = 1;
14858         int remaining;
14859         uint16_t *queue = NULL;
14860
14861         if (!shared_rss)
14862                 return rte_flow_error_set(error, EINVAL,
14863                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14864                                           "invalid shared action");
14865         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
14866                                          0, 0, __ATOMIC_ACQUIRE,
14867                                          __ATOMIC_RELAXED))
14868                 return rte_flow_error_set(error, EBUSY,
14869                                           RTE_FLOW_ERROR_TYPE_ACTION,
14870                                           NULL,
14871                                           "shared rss has references");
14872         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14873         if (remaining)
14874                 return rte_flow_error_set(error, EBUSY,
14875                                           RTE_FLOW_ERROR_TYPE_ACTION,
14876                                           NULL,
14877                                           "shared rss hrxq has references");
14878         queue = shared_rss->ind_tbl->queues;
14879         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true,
14880                                                !!dev->data->dev_started);
14881         if (remaining)
14882                 return rte_flow_error_set(error, EBUSY,
14883                                           RTE_FLOW_ERROR_TYPE_ACTION,
14884                                           NULL,
14885                                           "shared rss indirection table has"
14886                                           " references");
14887         mlx5_free(queue);
14888         rte_spinlock_lock(&priv->shared_act_sl);
14889         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14890                      &priv->rss_shared_actions, idx, shared_rss, next);
14891         rte_spinlock_unlock(&priv->shared_act_sl);
14892         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14893                         idx);
14894         return 0;
14895 }
14896
14897 /**
14898  * Create indirect action, lock free,
14899  * (mutex should be acquired by caller).
14900  * Dispatcher for action type specific call.
14901  *
14902  * @param[in] dev
14903  *   Pointer to the Ethernet device structure.
14904  * @param[in] conf
14905  *   Shared action configuration.
14906  * @param[in] action
14907  *   Action specification used to create indirect action.
14908  * @param[out] error
14909  *   Perform verbose error reporting if not NULL. Initialized in case of
14910  *   error only.
14911  *
14912  * @return
14913  *   A valid shared action handle in case of success, NULL otherwise and
14914  *   rte_errno is set.
14915  */
14916 static struct rte_flow_action_handle *
14917 flow_dv_action_create(struct rte_eth_dev *dev,
14918                       const struct rte_flow_indir_action_conf *conf,
14919                       const struct rte_flow_action *action,
14920                       struct rte_flow_error *err)
14921 {
14922         struct mlx5_priv *priv = dev->data->dev_private;
14923         uint32_t age_idx = 0;
14924         uint32_t idx = 0;
14925         uint32_t ret = 0;
14926
14927         switch (action->type) {
14928         case RTE_FLOW_ACTION_TYPE_RSS:
14929                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
14930                 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
14931                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14932                 break;
14933         case RTE_FLOW_ACTION_TYPE_AGE:
14934                 age_idx = flow_dv_aso_age_alloc(dev, err);
14935                 if (!age_idx) {
14936                         ret = -rte_errno;
14937                         break;
14938                 }
14939                 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
14940                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
14941                 flow_dv_aso_age_params_init(dev, age_idx,
14942                                         ((const struct rte_flow_action_age *)
14943                                                 action->conf)->context ?
14944                                         ((const struct rte_flow_action_age *)
14945                                                 action->conf)->context :
14946                                         (void *)(uintptr_t)idx,
14947                                         ((const struct rte_flow_action_age *)
14948                                                 action->conf)->timeout);
14949                 ret = age_idx;
14950                 break;
14951         case RTE_FLOW_ACTION_TYPE_COUNT:
14952                 ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
14953                 idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
14954                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14955                 break;
14956         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
14957                 ret = flow_dv_translate_create_conntrack(dev, action->conf,
14958                                                          err);
14959                 idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
14960                 break;
14961         default:
14962                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
14963                                    NULL, "action type not supported");
14964                 break;
14965         }
14966         return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
14967 }
14968
14969 /**
14970  * Destroy the indirect action.
14971  * Release action related resources on the NIC and the memory.
14972  * Lock free, (mutex should be acquired by caller).
14973  * Dispatcher for action type specific call.
14974  *
14975  * @param[in] dev
14976  *   Pointer to the Ethernet device structure.
14977  * @param[in] handle
14978  *   The indirect action object handle to be removed.
14979  * @param[out] error
14980  *   Perform verbose error reporting if not NULL. Initialized in case of
14981  *   error only.
14982  *
14983  * @return
14984  *   0 on success, otherwise negative errno value.
14985  */
14986 static int
14987 flow_dv_action_destroy(struct rte_eth_dev *dev,
14988                        struct rte_flow_action_handle *handle,
14989                        struct rte_flow_error *error)
14990 {
14991         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14992         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14993         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14994         struct mlx5_flow_counter *cnt;
14995         uint32_t no_flow_refcnt = 1;
14996         int ret;
14997
14998         switch (type) {
14999         case MLX5_INDIRECT_ACTION_TYPE_RSS:
15000                 return __flow_dv_action_rss_release(dev, idx, error);
15001         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15002                 cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
15003                 if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
15004                                                  &no_flow_refcnt, 1, false,
15005                                                  __ATOMIC_ACQUIRE,
15006                                                  __ATOMIC_RELAXED))
15007                         return rte_flow_error_set(error, EBUSY,
15008                                                   RTE_FLOW_ERROR_TYPE_ACTION,
15009                                                   NULL,
15010                                                   "Indirect count action has references");
15011                 flow_dv_counter_free(dev, idx);
15012                 return 0;
15013         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15014                 ret = flow_dv_aso_age_release(dev, idx);
15015                 if (ret)
15016                         /*
15017                          * In this case, the last flow has a reference will
15018                          * actually release the age action.
15019                          */
15020                         DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
15021                                 " released with references %d.", idx, ret);
15022                 return 0;
15023         case MLX5_INDIRECT_ACTION_TYPE_CT:
15024                 ret = flow_dv_aso_ct_release(dev, idx, error);
15025                 if (ret < 0)
15026                         return ret;
15027                 if (ret > 0)
15028                         DRV_LOG(DEBUG, "Connection tracking object %u still "
15029                                 "has references %d.", idx, ret);
15030                 return 0;
15031         default:
15032                 return rte_flow_error_set(error, ENOTSUP,
15033                                           RTE_FLOW_ERROR_TYPE_ACTION,
15034                                           NULL,
15035                                           "action type not supported");
15036         }
15037 }
15038
15039 /**
15040  * Updates in place shared RSS action configuration.
15041  *
15042  * @param[in] dev
15043  *   Pointer to the Ethernet device structure.
15044  * @param[in] idx
15045  *   The shared RSS action object ID to be updated.
15046  * @param[in] action_conf
15047  *   RSS action specification used to modify *shared_rss*.
15048  * @param[out] error
15049  *   Perform verbose error reporting if not NULL. Initialized in case of
15050  *   error only.
15051  *
15052  * @return
15053  *   0 on success, otherwise negative errno value.
15054  * @note: currently only support update of RSS queues.
15055  */
15056 static int
15057 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
15058                             const struct rte_flow_action_rss *action_conf,
15059                             struct rte_flow_error *error)
15060 {
15061         struct mlx5_priv *priv = dev->data->dev_private;
15062         struct mlx5_shared_action_rss *shared_rss =
15063             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
15064         int ret = 0;
15065         void *queue = NULL;
15066         uint16_t *queue_old = NULL;
15067         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
15068         bool dev_started = !!dev->data->dev_started;
15069
15070         if (!shared_rss)
15071                 return rte_flow_error_set(error, EINVAL,
15072                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15073                                           "invalid shared action to update");
15074         if (priv->obj_ops.ind_table_modify == NULL)
15075                 return rte_flow_error_set(error, ENOTSUP,
15076                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15077                                           "cannot modify indirection table");
15078         queue = mlx5_malloc(MLX5_MEM_ZERO,
15079                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
15080                             0, SOCKET_ID_ANY);
15081         if (!queue)
15082                 return rte_flow_error_set(error, ENOMEM,
15083                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15084                                           NULL,
15085                                           "cannot allocate resource memory");
15086         memcpy(queue, action_conf->queue, queue_size);
15087         MLX5_ASSERT(shared_rss->ind_tbl);
15088         rte_spinlock_lock(&shared_rss->action_rss_sl);
15089         queue_old = shared_rss->ind_tbl->queues;
15090         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
15091                                         queue, action_conf->queue_num,
15092                                         true /* standalone */,
15093                                         dev_started /* ref_new_qs */,
15094                                         dev_started /* deref_old_qs */);
15095         if (ret) {
15096                 mlx5_free(queue);
15097                 ret = rte_flow_error_set(error, rte_errno,
15098                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15099                                           "cannot update indirection table");
15100         } else {
15101                 mlx5_free(queue_old);
15102                 shared_rss->origin.queue = queue;
15103                 shared_rss->origin.queue_num = action_conf->queue_num;
15104         }
15105         rte_spinlock_unlock(&shared_rss->action_rss_sl);
15106         return ret;
15107 }
15108
15109 /*
15110  * Updates in place conntrack context or direction.
15111  * Context update should be synchronized.
15112  *
15113  * @param[in] dev
15114  *   Pointer to the Ethernet device structure.
15115  * @param[in] idx
15116  *   The conntrack object ID to be updated.
15117  * @param[in] update
15118  *   Pointer to the structure of information to update.
15119  * @param[out] error
15120  *   Perform verbose error reporting if not NULL. Initialized in case of
15121  *   error only.
15122  *
15123  * @return
15124  *   0 on success, otherwise negative errno value.
15125  */
15126 static int
15127 __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,
15128                            const struct rte_flow_modify_conntrack *update,
15129                            struct rte_flow_error *error)
15130 {
15131         struct mlx5_priv *priv = dev->data->dev_private;
15132         struct mlx5_aso_ct_action *ct;
15133         const struct rte_flow_action_conntrack *new_prf;
15134         int ret = 0;
15135         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15136         uint32_t dev_idx;
15137
15138         if (PORT_ID(priv) != owner)
15139                 return rte_flow_error_set(error, EACCES,
15140                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15141                                           NULL,
15142                                           "CT object owned by another port");
15143         dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15144         ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15145         if (!ct->refcnt)
15146                 return rte_flow_error_set(error, ENOMEM,
15147                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15148                                           NULL,
15149                                           "CT object is inactive");
15150         new_prf = &update->new_ct;
15151         if (update->direction)
15152                 ct->is_original = !!new_prf->is_original_dir;
15153         if (update->state) {
15154                 /* Only validate the profile when it needs to be updated. */
15155                 ret = mlx5_validate_action_ct(dev, new_prf, error);
15156                 if (ret)
15157                         return ret;
15158                 ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
15159                 if (ret)
15160                         return rte_flow_error_set(error, EIO,
15161                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15162                                         NULL,
15163                                         "Failed to send CT context update WQE");
15164                 /* Block until ready or a failure. */
15165                 ret = mlx5_aso_ct_available(priv->sh, ct);
15166                 if (ret)
15167                         rte_flow_error_set(error, rte_errno,
15168                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15169                                            NULL,
15170                                            "Timeout to get the CT update");
15171         }
15172         return ret;
15173 }
15174
15175 /**
15176  * Updates in place shared action configuration, lock free,
15177  * (mutex should be acquired by caller).
15178  *
15179  * @param[in] dev
15180  *   Pointer to the Ethernet device structure.
15181  * @param[in] handle
15182  *   The indirect action object handle to be updated.
15183  * @param[in] update
15184  *   Action specification used to modify the action pointed by *handle*.
15185  *   *update* could be of same type with the action pointed by the *handle*
15186  *   handle argument, or some other structures like a wrapper, depending on
15187  *   the indirect action type.
15188  * @param[out] error
15189  *   Perform verbose error reporting if not NULL. Initialized in case of
15190  *   error only.
15191  *
15192  * @return
15193  *   0 on success, otherwise negative errno value.
15194  */
15195 static int
15196 flow_dv_action_update(struct rte_eth_dev *dev,
15197                         struct rte_flow_action_handle *handle,
15198                         const void *update,
15199                         struct rte_flow_error *err)
15200 {
15201         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15202         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15203         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15204         const void *action_conf;
15205
15206         switch (type) {
15207         case MLX5_INDIRECT_ACTION_TYPE_RSS:
15208                 action_conf = ((const struct rte_flow_action *)update)->conf;
15209                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
15210         case MLX5_INDIRECT_ACTION_TYPE_CT:
15211                 return __flow_dv_action_ct_update(dev, idx, update, err);
15212         default:
15213                 return rte_flow_error_set(err, ENOTSUP,
15214                                           RTE_FLOW_ERROR_TYPE_ACTION,
15215                                           NULL,
15216                                           "action type update not supported");
15217         }
15218 }
15219
15220 /**
15221  * Destroy the meter sub policy table rules.
15222  * Lock free, (mutex should be acquired by caller).
15223  *
15224  * @param[in] dev
15225  *   Pointer to Ethernet device.
15226  * @param[in] sub_policy
15227  *   Pointer to meter sub policy table.
15228  */
15229 static void
15230 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
15231                              struct mlx5_flow_meter_sub_policy *sub_policy)
15232 {
15233         struct mlx5_priv *priv = dev->data->dev_private;
15234         struct mlx5_flow_tbl_data_entry *tbl;
15235         struct mlx5_flow_meter_policy *policy = sub_policy->main_policy;
15236         struct mlx5_flow_meter_info *next_fm;
15237         struct mlx5_sub_policy_color_rule *color_rule;
15238         void *tmp;
15239         uint32_t i;
15240
15241         for (i = 0; i < RTE_COLORS; i++) {
15242                 next_fm = NULL;
15243                 if (i == RTE_COLOR_GREEN && policy &&
15244                     policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR)
15245                         next_fm = mlx5_flow_meter_find(priv,
15246                                         policy->act_cnt[i].next_mtr_id, NULL);
15247                 RTE_TAILQ_FOREACH_SAFE(color_rule, &sub_policy->color_rules[i],
15248                                    next_port, tmp) {
15249                         claim_zero(mlx5_flow_os_destroy_flow(color_rule->rule));
15250                         tbl = container_of(color_rule->matcher->tbl,
15251                                            typeof(*tbl), tbl);
15252                         mlx5_list_unregister(tbl->matchers,
15253                                              &color_rule->matcher->entry);
15254                         TAILQ_REMOVE(&sub_policy->color_rules[i],
15255                                      color_rule, next_port);
15256                         mlx5_free(color_rule);
15257                         if (next_fm)
15258                                 mlx5_flow_meter_detach(priv, next_fm);
15259                 }
15260         }
15261         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15262                 if (sub_policy->rix_hrxq[i]) {
15263                         if (policy && !policy->is_hierarchy)
15264                                 mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
15265                         sub_policy->rix_hrxq[i] = 0;
15266                 }
15267                 if (sub_policy->jump_tbl[i]) {
15268                         flow_dv_tbl_resource_release(MLX5_SH(dev),
15269                                                      sub_policy->jump_tbl[i]);
15270                         sub_policy->jump_tbl[i] = NULL;
15271                 }
15272         }
15273         if (sub_policy->tbl_rsc) {
15274                 flow_dv_tbl_resource_release(MLX5_SH(dev),
15275                                              sub_policy->tbl_rsc);
15276                 sub_policy->tbl_rsc = NULL;
15277         }
15278 }
15279
15280 /**
15281  * Destroy policy rules, lock free,
15282  * (mutex should be acquired by caller).
15283  * Dispatcher for action type specific call.
15284  *
15285  * @param[in] dev
15286  *   Pointer to the Ethernet device structure.
15287  * @param[in] mtr_policy
15288  *   Meter policy struct.
15289  */
15290 static void
15291 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
15292                              struct mlx5_flow_meter_policy *mtr_policy)
15293 {
15294         uint32_t i, j;
15295         struct mlx5_flow_meter_sub_policy *sub_policy;
15296         uint16_t sub_policy_num;
15297
15298         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15299                 sub_policy_num = (mtr_policy->sub_policy_num >>
15300                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15301                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15302                 for (j = 0; j < sub_policy_num; j++) {
15303                         sub_policy = mtr_policy->sub_policys[i][j];
15304                         if (sub_policy)
15305                                 __flow_dv_destroy_sub_policy_rules(dev,
15306                                                                    sub_policy);
15307                 }
15308         }
15309 }
15310
15311 /**
15312  * Destroy policy action, lock free,
15313  * (mutex should be acquired by caller).
15314  * Dispatcher for action type specific call.
15315  *
15316  * @param[in] dev
15317  *   Pointer to the Ethernet device structure.
15318  * @param[in] mtr_policy
15319  *   Meter policy struct.
15320  */
15321 static void
15322 flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
15323                       struct mlx5_flow_meter_policy *mtr_policy)
15324 {
15325         struct rte_flow_action *rss_action;
15326         struct mlx5_flow_handle dev_handle;
15327         uint32_t i, j;
15328
15329         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15330                 if (mtr_policy->act_cnt[i].rix_mark) {
15331                         flow_dv_tag_release(dev,
15332                                 mtr_policy->act_cnt[i].rix_mark);
15333                         mtr_policy->act_cnt[i].rix_mark = 0;
15334                 }
15335                 if (mtr_policy->act_cnt[i].modify_hdr) {
15336                         dev_handle.dvh.modify_hdr =
15337                                 mtr_policy->act_cnt[i].modify_hdr;
15338                         flow_dv_modify_hdr_resource_release(dev, &dev_handle);
15339                 }
15340                 switch (mtr_policy->act_cnt[i].fate_action) {
15341                 case MLX5_FLOW_FATE_SHARED_RSS:
15342                         rss_action = mtr_policy->act_cnt[i].rss;
15343                         mlx5_free(rss_action);
15344                         break;
15345                 case MLX5_FLOW_FATE_PORT_ID:
15346                         if (mtr_policy->act_cnt[i].rix_port_id_action) {
15347                                 flow_dv_port_id_action_resource_release(dev,
15348                                 mtr_policy->act_cnt[i].rix_port_id_action);
15349                                 mtr_policy->act_cnt[i].rix_port_id_action = 0;
15350                         }
15351                         break;
15352                 case MLX5_FLOW_FATE_DROP:
15353                 case MLX5_FLOW_FATE_JUMP:
15354                         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15355                                 mtr_policy->act_cnt[i].dr_jump_action[j] =
15356                                                 NULL;
15357                         break;
15358                 default:
15359                         /*Queue action do nothing*/
15360                         break;
15361                 }
15362         }
15363         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15364                 mtr_policy->dr_drop_action[j] = NULL;
15365 }
15366
15367 /**
15368  * Create policy action per domain, lock free,
15369  * (mutex should be acquired by caller).
15370  * Dispatcher for action type specific call.
15371  *
15372  * @param[in] dev
15373  *   Pointer to the Ethernet device structure.
15374  * @param[in] mtr_policy
15375  *   Meter policy struct.
15376  * @param[in] action
15377  *   Action specification used to create meter actions.
15378  * @param[out] error
15379  *   Perform verbose error reporting if not NULL. Initialized in case of
15380  *   error only.
15381  *
15382  * @return
15383  *   0 on success, otherwise negative errno value.
15384  */
15385 static int
15386 __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
15387                         struct mlx5_flow_meter_policy *mtr_policy,
15388                         const struct rte_flow_action *actions[RTE_COLORS],
15389                         enum mlx5_meter_domain domain,
15390                         struct rte_mtr_error *error)
15391 {
15392         struct mlx5_priv *priv = dev->data->dev_private;
15393         struct rte_flow_error flow_err;
15394         const struct rte_flow_action *act;
15395         uint64_t action_flags;
15396         struct mlx5_flow_handle dh;
15397         struct mlx5_flow dev_flow;
15398         struct mlx5_flow_dv_port_id_action_resource port_id_action;
15399         int i, ret;
15400         uint8_t egress, transfer;
15401         struct mlx5_meter_policy_action_container *act_cnt = NULL;
15402         union {
15403                 struct mlx5_flow_dv_modify_hdr_resource res;
15404                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
15405                             sizeof(struct mlx5_modification_cmd) *
15406                             (MLX5_MAX_MODIFY_NUM + 1)];
15407         } mhdr_dummy;
15408         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
15409         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
15410
15411         MLX5_ASSERT(wks);
15412         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15413         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15414         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
15415         memset(&dev_flow, 0, sizeof(struct mlx5_flow));
15416         memset(&port_id_action, 0,
15417                sizeof(struct mlx5_flow_dv_port_id_action_resource));
15418         memset(mhdr_res, 0, sizeof(*mhdr_res));
15419         mhdr_res->ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
15420                                        (egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
15421                                         MLX5DV_FLOW_TABLE_TYPE_NIC_RX);
15422         dev_flow.handle = &dh;
15423         dev_flow.dv.port_id_action = &port_id_action;
15424         dev_flow.external = true;
15425         for (i = 0; i < RTE_COLORS; i++) {
15426                 if (i < MLX5_MTR_RTE_COLORS)
15427                         act_cnt = &mtr_policy->act_cnt[i];
15428                 /* Skip the color policy actions creation. */
15429                 if ((i == RTE_COLOR_YELLOW && mtr_policy->skip_y) ||
15430                     (i == RTE_COLOR_GREEN && mtr_policy->skip_g))
15431                         continue;
15432                 action_flags = 0;
15433                 for (act = actions[i];
15434                      act && act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
15435                         switch (act->type) {
15436                         case RTE_FLOW_ACTION_TYPE_MARK:
15437                         {
15438                                 uint32_t tag_be = mlx5_flow_mark_set
15439                                         (((const struct rte_flow_action_mark *)
15440                                         (act->conf))->id);
15441
15442                                 if (i >= MLX5_MTR_RTE_COLORS)
15443                                         return -rte_mtr_error_set(error,
15444                                           ENOTSUP,
15445                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15446                                           NULL,
15447                                           "cannot create policy "
15448                                           "mark action for this color");
15449                                 wks->mark = 1;
15450                                 if (flow_dv_tag_resource_register(dev, tag_be,
15451                                                   &dev_flow, &flow_err))
15452                                         return -rte_mtr_error_set(error,
15453                                         ENOTSUP,
15454                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15455                                         NULL,
15456                                         "cannot setup policy mark action");
15457                                 MLX5_ASSERT(dev_flow.dv.tag_resource);
15458                                 act_cnt->rix_mark =
15459                                         dev_flow.handle->dvh.rix_tag;
15460                                 action_flags |= MLX5_FLOW_ACTION_MARK;
15461                                 break;
15462                         }
15463                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
15464                                 if (i >= MLX5_MTR_RTE_COLORS)
15465                                         return -rte_mtr_error_set(error,
15466                                           ENOTSUP,
15467                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15468                                           NULL,
15469                                           "cannot create policy "
15470                                           "set tag action for this color");
15471                                 if (flow_dv_convert_action_set_tag
15472                                 (dev, mhdr_res,
15473                                 (const struct rte_flow_action_set_tag *)
15474                                 act->conf,  &flow_err))
15475                                         return -rte_mtr_error_set(error,
15476                                         ENOTSUP,
15477                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15478                                         NULL, "cannot convert policy "
15479                                         "set tag action");
15480                                 if (!mhdr_res->actions_num)
15481                                         return -rte_mtr_error_set(error,
15482                                         ENOTSUP,
15483                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15484                                         NULL, "cannot find policy "
15485                                         "set tag action");
15486                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
15487                                 break;
15488                         case RTE_FLOW_ACTION_TYPE_DROP:
15489                         {
15490                                 struct mlx5_flow_mtr_mng *mtrmng =
15491                                                 priv->sh->mtrmng;
15492                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15493
15494                                 /*
15495                                  * Create the drop table with
15496                                  * METER DROP level.
15497                                  */
15498                                 if (!mtrmng->drop_tbl[domain]) {
15499                                         mtrmng->drop_tbl[domain] =
15500                                         flow_dv_tbl_resource_get(dev,
15501                                         MLX5_FLOW_TABLE_LEVEL_METER,
15502                                         egress, transfer, false, NULL, 0,
15503                                         0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
15504                                         if (!mtrmng->drop_tbl[domain])
15505                                                 return -rte_mtr_error_set
15506                                         (error, ENOTSUP,
15507                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15508                                         NULL,
15509                                         "Failed to create meter drop table");
15510                                 }
15511                                 tbl_data = container_of
15512                                 (mtrmng->drop_tbl[domain],
15513                                 struct mlx5_flow_tbl_data_entry, tbl);
15514                                 if (i < MLX5_MTR_RTE_COLORS) {
15515                                         act_cnt->dr_jump_action[domain] =
15516                                                 tbl_data->jump.action;
15517                                         act_cnt->fate_action =
15518                                                 MLX5_FLOW_FATE_DROP;
15519                                 }
15520                                 if (i == RTE_COLOR_RED)
15521                                         mtr_policy->dr_drop_action[domain] =
15522                                                 tbl_data->jump.action;
15523                                 action_flags |= MLX5_FLOW_ACTION_DROP;
15524                                 break;
15525                         }
15526                         case RTE_FLOW_ACTION_TYPE_QUEUE:
15527                         {
15528                                 if (i >= MLX5_MTR_RTE_COLORS)
15529                                         return -rte_mtr_error_set(error,
15530                                         ENOTSUP,
15531                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15532                                         NULL, "cannot create policy "
15533                                         "fate queue for this color");
15534                                 act_cnt->queue =
15535                                 ((const struct rte_flow_action_queue *)
15536                                         (act->conf))->index;
15537                                 act_cnt->fate_action =
15538                                         MLX5_FLOW_FATE_QUEUE;
15539                                 dev_flow.handle->fate_action =
15540                                         MLX5_FLOW_FATE_QUEUE;
15541                                 mtr_policy->is_queue = 1;
15542                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
15543                                 break;
15544                         }
15545                         case RTE_FLOW_ACTION_TYPE_RSS:
15546                         {
15547                                 int rss_size;
15548
15549                                 if (i >= MLX5_MTR_RTE_COLORS)
15550                                         return -rte_mtr_error_set(error,
15551                                           ENOTSUP,
15552                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15553                                           NULL,
15554                                           "cannot create policy "
15555                                           "rss action for this color");
15556                                 /*
15557                                  * Save RSS conf into policy struct
15558                                  * for translate stage.
15559                                  */
15560                                 rss_size = (int)rte_flow_conv
15561                                         (RTE_FLOW_CONV_OP_ACTION,
15562                                         NULL, 0, act, &flow_err);
15563                                 if (rss_size <= 0)
15564                                         return -rte_mtr_error_set(error,
15565                                           ENOTSUP,
15566                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15567                                           NULL, "Get the wrong "
15568                                           "rss action struct size");
15569                                 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
15570                                                 rss_size, 0, SOCKET_ID_ANY);
15571                                 if (!act_cnt->rss)
15572                                         return -rte_mtr_error_set(error,
15573                                           ENOTSUP,
15574                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15575                                           NULL,
15576                                           "Fail to malloc rss action memory");
15577                                 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
15578                                         act_cnt->rss, rss_size,
15579                                         act, &flow_err);
15580                                 if (ret < 0)
15581                                         return -rte_mtr_error_set(error,
15582                                           ENOTSUP,
15583                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15584                                           NULL, "Fail to save "
15585                                           "rss action into policy struct");
15586                                 act_cnt->fate_action =
15587                                         MLX5_FLOW_FATE_SHARED_RSS;
15588                                 action_flags |= MLX5_FLOW_ACTION_RSS;
15589                                 break;
15590                         }
15591                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
15592                         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
15593                         {
15594                                 struct mlx5_flow_dv_port_id_action_resource
15595                                         port_id_resource;
15596                                 uint32_t port_id = 0;
15597
15598                                 if (i >= MLX5_MTR_RTE_COLORS)
15599                                         return -rte_mtr_error_set(error,
15600                                         ENOTSUP,
15601                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15602                                         NULL, "cannot create policy "
15603                                         "port action for this color");
15604                                 memset(&port_id_resource, 0,
15605                                         sizeof(port_id_resource));
15606                                 if (flow_dv_translate_action_port_id(dev, act,
15607                                                 &port_id, &flow_err))
15608                                         return -rte_mtr_error_set(error,
15609                                         ENOTSUP,
15610                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15611                                         NULL, "cannot translate "
15612                                         "policy port action");
15613                                 port_id_resource.port_id = port_id;
15614                                 if (flow_dv_port_id_action_resource_register
15615                                         (dev, &port_id_resource,
15616                                         &dev_flow, &flow_err))
15617                                         return -rte_mtr_error_set(error,
15618                                         ENOTSUP,
15619                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15620                                         NULL, "cannot setup "
15621                                         "policy port action");
15622                                 act_cnt->rix_port_id_action =
15623                                         dev_flow.handle->rix_port_id_action;
15624                                 act_cnt->fate_action =
15625                                         MLX5_FLOW_FATE_PORT_ID;
15626                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
15627                                 break;
15628                         }
15629                         case RTE_FLOW_ACTION_TYPE_JUMP:
15630                         {
15631                                 uint32_t jump_group = 0;
15632                                 uint32_t table = 0;
15633                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15634                                 struct flow_grp_info grp_info = {
15635                                         .external = !!dev_flow.external,
15636                                         .transfer = !!transfer,
15637                                         .fdb_def_rule = !!priv->fdb_def_rule,
15638                                         .std_tbl_fix = 0,
15639                                         .skip_scale = dev_flow.skip_scale &
15640                                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
15641                                 };
15642                                 struct mlx5_flow_meter_sub_policy *sub_policy =
15643                                         mtr_policy->sub_policys[domain][0];
15644
15645                                 if (i >= MLX5_MTR_RTE_COLORS)
15646                                         return -rte_mtr_error_set(error,
15647                                           ENOTSUP,
15648                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15649                                           NULL,
15650                                           "cannot create policy "
15651                                           "jump action for this color");
15652                                 jump_group =
15653                                 ((const struct rte_flow_action_jump *)
15654                                                         act->conf)->group;
15655                                 if (mlx5_flow_group_to_table(dev, NULL,
15656                                                        jump_group,
15657                                                        &table,
15658                                                        &grp_info, &flow_err))
15659                                         return -rte_mtr_error_set(error,
15660                                         ENOTSUP,
15661                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15662                                         NULL, "cannot setup "
15663                                         "policy jump action");
15664                                 sub_policy->jump_tbl[i] =
15665                                 flow_dv_tbl_resource_get(dev,
15666                                         table, egress,
15667                                         transfer,
15668                                         !!dev_flow.external,
15669                                         NULL, jump_group, 0,
15670                                         0, &flow_err);
15671                                 if
15672                                 (!sub_policy->jump_tbl[i])
15673                                         return  -rte_mtr_error_set(error,
15674                                         ENOTSUP,
15675                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15676                                         NULL, "cannot create jump action.");
15677                                 tbl_data = container_of
15678                                 (sub_policy->jump_tbl[i],
15679                                 struct mlx5_flow_tbl_data_entry, tbl);
15680                                 act_cnt->dr_jump_action[domain] =
15681                                         tbl_data->jump.action;
15682                                 act_cnt->fate_action =
15683                                         MLX5_FLOW_FATE_JUMP;
15684                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
15685                                 break;
15686                         }
15687                         /*
15688                          * No need to check meter hierarchy for Y or R colors
15689                          * here since it is done in the validation stage.
15690                          */
15691                         case RTE_FLOW_ACTION_TYPE_METER:
15692                         {
15693                                 const struct rte_flow_action_meter *mtr;
15694                                 struct mlx5_flow_meter_info *next_fm;
15695                                 struct mlx5_flow_meter_policy *next_policy;
15696                                 struct rte_flow_action tag_action;
15697                                 struct mlx5_rte_flow_action_set_tag set_tag;
15698                                 uint32_t next_mtr_idx = 0;
15699
15700                                 mtr = act->conf;
15701                                 next_fm = mlx5_flow_meter_find(priv,
15702                                                         mtr->mtr_id,
15703                                                         &next_mtr_idx);
15704                                 if (!next_fm)
15705                                         return -rte_mtr_error_set(error, EINVAL,
15706                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15707                                                 "Fail to find next meter.");
15708                                 if (next_fm->def_policy)
15709                                         return -rte_mtr_error_set(error, EINVAL,
15710                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15711                                 "Hierarchy only supports termination meter.");
15712                                 next_policy = mlx5_flow_meter_policy_find(dev,
15713                                                 next_fm->policy_id, NULL);
15714                                 MLX5_ASSERT(next_policy);
15715                                 if (next_fm->drop_cnt) {
15716                                         set_tag.id =
15717                                                 (enum modify_reg)
15718                                                 mlx5_flow_get_reg_id(dev,
15719                                                 MLX5_MTR_ID,
15720                                                 0,
15721                                                 (struct rte_flow_error *)error);
15722                                         set_tag.offset = (priv->mtr_reg_share ?
15723                                                 MLX5_MTR_COLOR_BITS : 0);
15724                                         set_tag.length = (priv->mtr_reg_share ?
15725                                                MLX5_MTR_IDLE_BITS_IN_COLOR_REG :
15726                                                MLX5_REG_BITS);
15727                                         set_tag.data = next_mtr_idx;
15728                                         tag_action.type =
15729                                                 (enum rte_flow_action_type)
15730                                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG;
15731                                         tag_action.conf = &set_tag;
15732                                         if (flow_dv_convert_action_set_reg
15733                                                 (mhdr_res, &tag_action,
15734                                                 (struct rte_flow_error *)error))
15735                                                 return -rte_errno;
15736                                         action_flags |=
15737                                                 MLX5_FLOW_ACTION_SET_TAG;
15738                                 }
15739                                 act_cnt->fate_action = MLX5_FLOW_FATE_MTR;
15740                                 act_cnt->next_mtr_id = next_fm->meter_id;
15741                                 act_cnt->next_sub_policy = NULL;
15742                                 mtr_policy->is_hierarchy = 1;
15743                                 mtr_policy->dev = next_policy->dev;
15744                                 action_flags |=
15745                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
15746                                 break;
15747                         }
15748                         default:
15749                                 return -rte_mtr_error_set(error, ENOTSUP,
15750                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15751                                           NULL, "action type not supported");
15752                         }
15753                         if (action_flags & MLX5_FLOW_ACTION_SET_TAG) {
15754                                 /* create modify action if needed. */
15755                                 dev_flow.dv.group = 1;
15756                                 if (flow_dv_modify_hdr_resource_register
15757                                         (dev, mhdr_res, &dev_flow, &flow_err))
15758                                         return -rte_mtr_error_set(error,
15759                                                 ENOTSUP,
15760                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15761                                                 NULL, "cannot register policy "
15762                                                 "set tag action");
15763                                 act_cnt->modify_hdr =
15764                                         dev_flow.handle->dvh.modify_hdr;
15765                         }
15766                 }
15767         }
15768         return 0;
15769 }
15770
15771 /**
15772  * Create policy action per domain, lock free,
15773  * (mutex should be acquired by caller).
15774  * Dispatcher for action type specific call.
15775  *
15776  * @param[in] dev
15777  *   Pointer to the Ethernet device structure.
15778  * @param[in] mtr_policy
15779  *   Meter policy struct.
15780  * @param[in] action
15781  *   Action specification used to create meter actions.
15782  * @param[out] error
15783  *   Perform verbose error reporting if not NULL. Initialized in case of
15784  *   error only.
15785  *
15786  * @return
15787  *   0 on success, otherwise negative errno value.
15788  */
15789 static int
15790 flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
15791                       struct mlx5_flow_meter_policy *mtr_policy,
15792                       const struct rte_flow_action *actions[RTE_COLORS],
15793                       struct rte_mtr_error *error)
15794 {
15795         int ret, i;
15796         uint16_t sub_policy_num;
15797
15798         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15799                 sub_policy_num = (mtr_policy->sub_policy_num >>
15800                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15801                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15802                 if (sub_policy_num) {
15803                         ret = __flow_dv_create_domain_policy_acts(dev,
15804                                 mtr_policy, actions,
15805                                 (enum mlx5_meter_domain)i, error);
15806                         /* Cleaning resource is done in the caller level. */
15807                         if (ret)
15808                                 return ret;
15809                 }
15810         }
15811         return 0;
15812 }
15813
15814 /**
15815  * Query a DV flow rule for its statistics via DevX.
15816  *
15817  * @param[in] dev
15818  *   Pointer to Ethernet device.
15819  * @param[in] cnt_idx
15820  *   Index to the flow counter.
15821  * @param[out] data
15822  *   Data retrieved by the query.
15823  * @param[out] error
15824  *   Perform verbose error reporting if not NULL.
15825  *
15826  * @return
15827  *   0 on success, a negative errno value otherwise and rte_errno is set.
15828  */
15829 int
15830 flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
15831                     struct rte_flow_error *error)
15832 {
15833         struct mlx5_priv *priv = dev->data->dev_private;
15834         struct rte_flow_query_count *qc = data;
15835
15836         if (!priv->sh->devx)
15837                 return rte_flow_error_set(error, ENOTSUP,
15838                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15839                                           NULL,
15840                                           "counters are not supported");
15841         if (cnt_idx) {
15842                 uint64_t pkts, bytes;
15843                 struct mlx5_flow_counter *cnt;
15844                 int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
15845
15846                 if (err)
15847                         return rte_flow_error_set(error, -err,
15848                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15849                                         NULL, "cannot read counters");
15850                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15851                 qc->hits_set = 1;
15852                 qc->bytes_set = 1;
15853                 qc->hits = pkts - cnt->hits;
15854                 qc->bytes = bytes - cnt->bytes;
15855                 if (qc->reset) {
15856                         cnt->hits = pkts;
15857                         cnt->bytes = bytes;
15858                 }
15859                 return 0;
15860         }
15861         return rte_flow_error_set(error, EINVAL,
15862                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15863                                   NULL,
15864                                   "counters are not available");
15865 }
15866
15867
15868 /**
15869  * Query counter's action pointer for a DV flow rule via DevX.
15870  *
15871  * @param[in] dev
15872  *   Pointer to Ethernet device.
15873  * @param[in] cnt_idx
15874  *   Index to the flow counter.
15875  * @param[out] action_ptr
15876  *   Action pointer for counter.
15877  * @param[out] error
15878  *   Perform verbose error reporting if not NULL.
15879  *
15880  * @return
15881  *   0 on success, a negative errno value otherwise and rte_errno is set.
15882  */
15883 int
15884 flow_dv_query_count_ptr(struct rte_eth_dev *dev, uint32_t cnt_idx,
15885         void **action_ptr, struct rte_flow_error *error)
15886 {
15887         struct mlx5_priv *priv = dev->data->dev_private;
15888
15889         if (!priv->sh->devx || !action_ptr)
15890                 return rte_flow_error_set(error, ENOTSUP,
15891                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15892                                           NULL,
15893                                           "counters are not supported");
15894
15895         if (cnt_idx) {
15896                 struct mlx5_flow_counter *cnt = NULL;
15897                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15898                 if (cnt) {
15899                         *action_ptr = cnt->action;
15900                         return 0;
15901                 }
15902         }
15903         return rte_flow_error_set(error, EINVAL,
15904                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15905                                   NULL,
15906                                   "counters are not available");
15907 }
15908
15909 static int
15910 flow_dv_action_query(struct rte_eth_dev *dev,
15911                      const struct rte_flow_action_handle *handle, void *data,
15912                      struct rte_flow_error *error)
15913 {
15914         struct mlx5_age_param *age_param;
15915         struct rte_flow_query_age *resp;
15916         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15917         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15918         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15919         struct mlx5_priv *priv = dev->data->dev_private;
15920         struct mlx5_aso_ct_action *ct;
15921         uint16_t owner;
15922         uint32_t dev_idx;
15923
15924         switch (type) {
15925         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15926                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
15927                 resp = data;
15928                 resp->aged = __atomic_load_n(&age_param->state,
15929                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
15930                                                                           1 : 0;
15931                 resp->sec_since_last_hit_valid = !resp->aged;
15932                 if (resp->sec_since_last_hit_valid)
15933                         resp->sec_since_last_hit = __atomic_load_n
15934                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15935                 return 0;
15936         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15937                 return flow_dv_query_count(dev, idx, data, error);
15938         case MLX5_INDIRECT_ACTION_TYPE_CT:
15939                 owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15940                 if (owner != PORT_ID(priv))
15941                         return rte_flow_error_set(error, EACCES,
15942                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15943                                         NULL,
15944                                         "CT object owned by another port");
15945                 dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15946                 ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15947                 MLX5_ASSERT(ct);
15948                 if (!ct->refcnt)
15949                         return rte_flow_error_set(error, EFAULT,
15950                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15951                                         NULL,
15952                                         "CT object is inactive");
15953                 ((struct rte_flow_action_conntrack *)data)->peer_port =
15954                                                         ct->peer;
15955                 ((struct rte_flow_action_conntrack *)data)->is_original_dir =
15956                                                         ct->is_original;
15957                 if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))
15958                         return rte_flow_error_set(error, EIO,
15959                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15960                                         NULL,
15961                                         "Failed to query CT context");
15962                 return 0;
15963         default:
15964                 return rte_flow_error_set(error, ENOTSUP,
15965                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15966                                           "action type query not supported");
15967         }
15968 }
15969
15970 /**
15971  * Query a flow rule AGE action for aging information.
15972  *
15973  * @param[in] dev
15974  *   Pointer to Ethernet device.
15975  * @param[in] flow
15976  *   Pointer to the sub flow.
15977  * @param[out] data
15978  *   data retrieved by the query.
15979  * @param[out] error
15980  *   Perform verbose error reporting if not NULL.
15981  *
15982  * @return
15983  *   0 on success, a negative errno value otherwise and rte_errno is set.
15984  */
15985 static int
15986 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
15987                   void *data, struct rte_flow_error *error)
15988 {
15989         struct rte_flow_query_age *resp = data;
15990         struct mlx5_age_param *age_param;
15991
15992         if (flow->age) {
15993                 struct mlx5_aso_age_action *act =
15994                                      flow_aso_age_get_by_idx(dev, flow->age);
15995
15996                 age_param = &act->age_params;
15997         } else if (flow->counter) {
15998                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
15999
16000                 if (!age_param || !age_param->timeout)
16001                         return rte_flow_error_set
16002                                         (error, EINVAL,
16003                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16004                                          NULL, "cannot read age data");
16005         } else {
16006                 return rte_flow_error_set(error, EINVAL,
16007                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16008                                           NULL, "age data not available");
16009         }
16010         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
16011                                      AGE_TMOUT ? 1 : 0;
16012         resp->sec_since_last_hit_valid = !resp->aged;
16013         if (resp->sec_since_last_hit_valid)
16014                 resp->sec_since_last_hit = __atomic_load_n
16015                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
16016         return 0;
16017 }
16018
16019 /**
16020  * Query a flow.
16021  *
16022  * @see rte_flow_query()
16023  * @see rte_flow_ops
16024  */
16025 static int
16026 flow_dv_query(struct rte_eth_dev *dev,
16027               struct rte_flow *flow __rte_unused,
16028               const struct rte_flow_action *actions __rte_unused,
16029               void *data __rte_unused,
16030               struct rte_flow_error *error __rte_unused)
16031 {
16032         int ret = -EINVAL;
16033
16034         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
16035                 switch (actions->type) {
16036                 case RTE_FLOW_ACTION_TYPE_VOID:
16037                         break;
16038                 case RTE_FLOW_ACTION_TYPE_COUNT:
16039                         ret = flow_dv_query_count(dev, flow->counter, data,
16040                                                   error);
16041                         break;
16042                 case RTE_FLOW_ACTION_TYPE_AGE:
16043                         ret = flow_dv_query_age(dev, flow, data, error);
16044                         break;
16045                 default:
16046                         return rte_flow_error_set(error, ENOTSUP,
16047                                                   RTE_FLOW_ERROR_TYPE_ACTION,
16048                                                   actions,
16049                                                   "action not supported");
16050                 }
16051         }
16052         return ret;
16053 }
16054
16055 /**
16056  * Destroy the meter table set.
16057  * Lock free, (mutex should be acquired by caller).
16058  *
16059  * @param[in] dev
16060  *   Pointer to Ethernet device.
16061  * @param[in] fm
16062  *   Meter information table.
16063  */
16064 static void
16065 flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
16066                         struct mlx5_flow_meter_info *fm)
16067 {
16068         struct mlx5_priv *priv = dev->data->dev_private;
16069         int i;
16070
16071         if (!fm || !priv->config.dv_flow_en)
16072                 return;
16073         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16074                 if (fm->drop_rule[i]) {
16075                         claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
16076                         fm->drop_rule[i] = NULL;
16077                 }
16078         }
16079 }
16080
16081 static void
16082 flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
16083 {
16084         struct mlx5_priv *priv = dev->data->dev_private;
16085         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16086         struct mlx5_flow_tbl_data_entry *tbl;
16087         int i, j;
16088
16089         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16090                 if (mtrmng->def_rule[i]) {
16091                         claim_zero(mlx5_flow_os_destroy_flow
16092                                         (mtrmng->def_rule[i]));
16093                         mtrmng->def_rule[i] = NULL;
16094                 }
16095                 if (mtrmng->def_matcher[i]) {
16096                         tbl = container_of(mtrmng->def_matcher[i]->tbl,
16097                                 struct mlx5_flow_tbl_data_entry, tbl);
16098                         mlx5_list_unregister(tbl->matchers,
16099                                              &mtrmng->def_matcher[i]->entry);
16100                         mtrmng->def_matcher[i] = NULL;
16101                 }
16102                 for (j = 0; j < MLX5_REG_BITS; j++) {
16103                         if (mtrmng->drop_matcher[i][j]) {
16104                                 tbl =
16105                                 container_of(mtrmng->drop_matcher[i][j]->tbl,
16106                                              struct mlx5_flow_tbl_data_entry,
16107                                              tbl);
16108                                 mlx5_list_unregister(tbl->matchers,
16109                                             &mtrmng->drop_matcher[i][j]->entry);
16110                                 mtrmng->drop_matcher[i][j] = NULL;
16111                         }
16112                 }
16113                 if (mtrmng->drop_tbl[i]) {
16114                         flow_dv_tbl_resource_release(MLX5_SH(dev),
16115                                 mtrmng->drop_tbl[i]);
16116                         mtrmng->drop_tbl[i] = NULL;
16117                 }
16118         }
16119 }
16120
16121 /* Number of meter flow actions, count and jump or count and drop. */
16122 #define METER_ACTIONS 2
16123
16124 static void
16125 __flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
16126                                     enum mlx5_meter_domain domain)
16127 {
16128         struct mlx5_priv *priv = dev->data->dev_private;
16129         struct mlx5_flow_meter_def_policy *def_policy =
16130                         priv->sh->mtrmng->def_policy[domain];
16131
16132         __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
16133         mlx5_free(def_policy);
16134         priv->sh->mtrmng->def_policy[domain] = NULL;
16135 }
16136
16137 /**
16138  * Destroy the default policy table set.
16139  *
16140  * @param[in] dev
16141  *   Pointer to Ethernet device.
16142  */
16143 static void
16144 flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
16145 {
16146         struct mlx5_priv *priv = dev->data->dev_private;
16147         int i;
16148
16149         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
16150                 if (priv->sh->mtrmng->def_policy[i])
16151                         __flow_dv_destroy_domain_def_policy(dev,
16152                                         (enum mlx5_meter_domain)i);
16153         priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
16154 }
16155
16156 static int
16157 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
16158                         uint32_t color_reg_c_idx,
16159                         enum rte_color color, void *matcher_object,
16160                         int actions_n, void *actions,
16161                         bool match_src_port, const struct rte_flow_item *item,
16162                         void **rule, const struct rte_flow_attr *attr)
16163 {
16164         int ret;
16165         struct mlx5_flow_dv_match_params value = {
16166                 .size = sizeof(value.buf),
16167         };
16168         struct mlx5_flow_dv_match_params matcher = {
16169                 .size = sizeof(matcher.buf),
16170         };
16171         struct mlx5_priv *priv = dev->data->dev_private;
16172         uint8_t misc_mask;
16173
16174         if (match_src_port && (priv->representor || priv->master)) {
16175                 if (flow_dv_translate_item_port_id(dev, matcher.buf,
16176                                                    value.buf, item, attr)) {
16177                         DRV_LOG(ERR, "Failed to create meter policy%d flow's"
16178                                 " value with port.", color);
16179                         return -1;
16180                 }
16181         }
16182         flow_dv_match_meta_reg(matcher.buf, value.buf,
16183                                (enum modify_reg)color_reg_c_idx,
16184                                rte_col_2_mlx5_col(color), UINT32_MAX);
16185         misc_mask = flow_dv_matcher_enable(value.buf);
16186         __flow_dv_adjust_buf_size(&value.size, misc_mask);
16187         ret = mlx5_flow_os_create_flow(matcher_object, (void *)&value,
16188                                        actions_n, actions, rule);
16189         if (ret) {
16190                 DRV_LOG(ERR, "Failed to create meter policy%d flow.", color);
16191                 return -1;
16192         }
16193         return 0;
16194 }
16195
16196 static int
16197 __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
16198                         uint32_t color_reg_c_idx,
16199                         uint16_t priority,
16200                         struct mlx5_flow_meter_sub_policy *sub_policy,
16201                         const struct rte_flow_attr *attr,
16202                         bool match_src_port,
16203                         const struct rte_flow_item *item,
16204                         struct mlx5_flow_dv_matcher **policy_matcher,
16205                         struct rte_flow_error *error)
16206 {
16207         struct mlx5_list_entry *entry;
16208         struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
16209         struct mlx5_flow_dv_matcher matcher = {
16210                 .mask = {
16211                         .size = sizeof(matcher.mask.buf),
16212                 },
16213                 .tbl = tbl_rsc,
16214         };
16215         struct mlx5_flow_dv_match_params value = {
16216                 .size = sizeof(value.buf),
16217         };
16218         struct mlx5_flow_cb_ctx ctx = {
16219                 .error = error,
16220                 .data = &matcher,
16221         };
16222         struct mlx5_flow_tbl_data_entry *tbl_data;
16223         struct mlx5_priv *priv = dev->data->dev_private;
16224         const uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
16225
16226         if (match_src_port && (priv->representor || priv->master)) {
16227                 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
16228                                                    value.buf, item, attr)) {
16229                         DRV_LOG(ERR, "Failed to register meter policy%d matcher"
16230                                 " with port.", priority);
16231                         return -1;
16232                 }
16233         }
16234         tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
16235         if (priority < RTE_COLOR_RED)
16236                 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16237                         (enum modify_reg)color_reg_c_idx, 0, color_mask);
16238         matcher.priority = priority;
16239         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
16240                                     matcher.mask.size);
16241         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16242         if (!entry) {
16243                 DRV_LOG(ERR, "Failed to register meter drop matcher.");
16244                 return -1;
16245         }
16246         *policy_matcher =
16247                 container_of(entry, struct mlx5_flow_dv_matcher, entry);
16248         return 0;
16249 }
16250
16251 /**
16252  * Create the policy rules per domain.
16253  *
16254  * @param[in] dev
16255  *   Pointer to Ethernet device.
16256  * @param[in] sub_policy
16257  *    Pointer to sub policy table..
16258  * @param[in] egress
16259  *   Direction of the table.
16260  * @param[in] transfer
16261  *   E-Switch or NIC flow.
16262  * @param[in] acts
16263  *   Pointer to policy action list per color.
16264  *
16265  * @return
16266  *   0 on success, -1 otherwise.
16267  */
16268 static int
16269 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
16270                 struct mlx5_flow_meter_sub_policy *sub_policy,
16271                 uint8_t egress, uint8_t transfer, bool match_src_port,
16272                 struct mlx5_meter_policy_acts acts[RTE_COLORS])
16273 {
16274         struct mlx5_priv *priv = dev->data->dev_private;
16275         struct rte_flow_error flow_err;
16276         uint32_t color_reg_c_idx;
16277         struct rte_flow_attr attr = {
16278                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
16279                 .priority = 0,
16280                 .ingress = 0,
16281                 .egress = !!egress,
16282                 .transfer = !!transfer,
16283                 .reserved = 0,
16284         };
16285         int i;
16286         int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
16287         struct mlx5_sub_policy_color_rule *color_rule;
16288         bool svport_match;
16289         struct mlx5_sub_policy_color_rule *tmp_rules[RTE_COLORS] = {NULL};
16290
16291         if (ret < 0)
16292                 return -1;
16293         /* Create policy table with POLICY level. */
16294         if (!sub_policy->tbl_rsc)
16295                 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
16296                                 MLX5_FLOW_TABLE_LEVEL_POLICY,
16297                                 egress, transfer, false, NULL, 0, 0,
16298                                 sub_policy->idx, &flow_err);
16299         if (!sub_policy->tbl_rsc) {
16300                 DRV_LOG(ERR,
16301                         "Failed to create meter sub policy table.");
16302                 return -1;
16303         }
16304         /* Prepare matchers. */
16305         color_reg_c_idx = ret;
16306         for (i = 0; i < RTE_COLORS; i++) {
16307                 TAILQ_INIT(&sub_policy->color_rules[i]);
16308                 if (!acts[i].actions_n)
16309                         continue;
16310                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
16311                                 sizeof(struct mlx5_sub_policy_color_rule),
16312                                 0, SOCKET_ID_ANY);
16313                 if (!color_rule) {
16314                         DRV_LOG(ERR, "No memory to create color rule.");
16315                         goto err_exit;
16316                 }
16317                 tmp_rules[i] = color_rule;
16318                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
16319                                   color_rule, next_port);
16320                 color_rule->src_port = priv->representor_id;
16321                 /* No use. */
16322                 attr.priority = i;
16323                 /* Create matchers for colors. */
16324                 svport_match = (i != RTE_COLOR_RED) ? match_src_port : false;
16325                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
16326                                 MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
16327                                 &attr, svport_match, NULL,
16328                                 &color_rule->matcher, &flow_err)) {
16329                         DRV_LOG(ERR, "Failed to create color%u matcher.", i);
16330                         goto err_exit;
16331                 }
16332                 /* Create flow, matching color. */
16333                 if (__flow_dv_create_policy_flow(dev,
16334                                 color_reg_c_idx, (enum rte_color)i,
16335                                 color_rule->matcher->matcher_object,
16336                                 acts[i].actions_n, acts[i].dv_actions,
16337                                 svport_match, NULL, &color_rule->rule,
16338                                 &attr)) {
16339                         DRV_LOG(ERR, "Failed to create color%u rule.", i);
16340                         goto err_exit;
16341                 }
16342         }
16343         return 0;
16344 err_exit:
16345         /* All the policy rules will be cleared. */
16346         do {
16347                 color_rule = tmp_rules[i];
16348                 if (color_rule) {
16349                         if (color_rule->rule)
16350                                 mlx5_flow_os_destroy_flow(color_rule->rule);
16351                         if (color_rule->matcher) {
16352                                 struct mlx5_flow_tbl_data_entry *tbl =
16353                                         container_of(color_rule->matcher->tbl,
16354                                                      typeof(*tbl), tbl);
16355                                 mlx5_list_unregister(tbl->matchers,
16356                                                 &color_rule->matcher->entry);
16357                         }
16358                         TAILQ_REMOVE(&sub_policy->color_rules[i],
16359                                      color_rule, next_port);
16360                         mlx5_free(color_rule);
16361                 }
16362         } while (i--);
16363         return -1;
16364 }
16365
16366 static int
16367 __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
16368                         struct mlx5_flow_meter_policy *mtr_policy,
16369                         struct mlx5_flow_meter_sub_policy *sub_policy,
16370                         uint32_t domain)
16371 {
16372         struct mlx5_priv *priv = dev->data->dev_private;
16373         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16374         struct mlx5_flow_dv_tag_resource *tag;
16375         struct mlx5_flow_dv_port_id_action_resource *port_action;
16376         struct mlx5_hrxq *hrxq;
16377         struct mlx5_flow_meter_info *next_fm = NULL;
16378         struct mlx5_flow_meter_policy *next_policy;
16379         struct mlx5_flow_meter_sub_policy *next_sub_policy;
16380         struct mlx5_flow_tbl_data_entry *tbl_data;
16381         struct rte_flow_error error;
16382         uint8_t egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16383         uint8_t transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16384         bool mtr_first = egress || (transfer && priv->representor_id != UINT16_MAX);
16385         bool match_src_port = false;
16386         int i;
16387
16388         /* If RSS or Queue, no previous actions / rules is created. */
16389         for (i = 0; i < RTE_COLORS; i++) {
16390                 acts[i].actions_n = 0;
16391                 if (i == RTE_COLOR_RED) {
16392                         /* Only support drop on red. */
16393                         acts[i].dv_actions[0] =
16394                                 mtr_policy->dr_drop_action[domain];
16395                         acts[i].actions_n = 1;
16396                         continue;
16397                 }
16398                 if (i == RTE_COLOR_GREEN &&
16399                     mtr_policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR) {
16400                         struct rte_flow_attr attr = {
16401                                 .transfer = transfer
16402                         };
16403
16404                         next_fm = mlx5_flow_meter_find(priv,
16405                                         mtr_policy->act_cnt[i].next_mtr_id,
16406                                         NULL);
16407                         if (!next_fm) {
16408                                 DRV_LOG(ERR,
16409                                         "Failed to get next hierarchy meter.");
16410                                 goto err_exit;
16411                         }
16412                         if (mlx5_flow_meter_attach(priv, next_fm,
16413                                                    &attr, &error)) {
16414                                 DRV_LOG(ERR, "%s", error.message);
16415                                 next_fm = NULL;
16416                                 goto err_exit;
16417                         }
16418                         /* Meter action must be the first for TX. */
16419                         if (mtr_first) {
16420                                 acts[i].dv_actions[acts[i].actions_n] =
16421                                         next_fm->meter_action;
16422                                 acts[i].actions_n++;
16423                         }
16424                 }
16425                 if (mtr_policy->act_cnt[i].rix_mark) {
16426                         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
16427                                         mtr_policy->act_cnt[i].rix_mark);
16428                         if (!tag) {
16429                                 DRV_LOG(ERR, "Failed to find "
16430                                 "mark action for policy.");
16431                                 goto err_exit;
16432                         }
16433                         acts[i].dv_actions[acts[i].actions_n] = tag->action;
16434                         acts[i].actions_n++;
16435                 }
16436                 if (mtr_policy->act_cnt[i].modify_hdr) {
16437                         acts[i].dv_actions[acts[i].actions_n] =
16438                                 mtr_policy->act_cnt[i].modify_hdr->action;
16439                         acts[i].actions_n++;
16440                 }
16441                 if (mtr_policy->act_cnt[i].fate_action) {
16442                         switch (mtr_policy->act_cnt[i].fate_action) {
16443                         case MLX5_FLOW_FATE_PORT_ID:
16444                                 port_action = mlx5_ipool_get
16445                                         (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
16446                                 mtr_policy->act_cnt[i].rix_port_id_action);
16447                                 if (!port_action) {
16448                                         DRV_LOG(ERR, "Failed to find "
16449                                                 "port action for policy.");
16450                                         goto err_exit;
16451                                 }
16452                                 acts[i].dv_actions[acts[i].actions_n] =
16453                                         port_action->action;
16454                                 acts[i].actions_n++;
16455                                 mtr_policy->dev = dev;
16456                                 match_src_port = true;
16457                                 break;
16458                         case MLX5_FLOW_FATE_DROP:
16459                         case MLX5_FLOW_FATE_JUMP:
16460                                 acts[i].dv_actions[acts[i].actions_n] =
16461                                 mtr_policy->act_cnt[i].dr_jump_action[domain];
16462                                 acts[i].actions_n++;
16463                                 break;
16464                         case MLX5_FLOW_FATE_SHARED_RSS:
16465                         case MLX5_FLOW_FATE_QUEUE:
16466                                 hrxq = mlx5_ipool_get
16467                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
16468                                          sub_policy->rix_hrxq[i]);
16469                                 if (!hrxq) {
16470                                         DRV_LOG(ERR, "Failed to find "
16471                                                 "queue action for policy.");
16472                                         goto err_exit;
16473                                 }
16474                                 acts[i].dv_actions[acts[i].actions_n] =
16475                                         hrxq->action;
16476                                 acts[i].actions_n++;
16477                                 break;
16478                         case MLX5_FLOW_FATE_MTR:
16479                                 if (!next_fm) {
16480                                         DRV_LOG(ERR,
16481                                                 "No next hierarchy meter.");
16482                                         goto err_exit;
16483                                 }
16484                                 if (!mtr_first) {
16485                                         acts[i].dv_actions[acts[i].actions_n] =
16486                                                         next_fm->meter_action;
16487                                         acts[i].actions_n++;
16488                                 }
16489                                 if (mtr_policy->act_cnt[i].next_sub_policy) {
16490                                         next_sub_policy =
16491                                         mtr_policy->act_cnt[i].next_sub_policy;
16492                                 } else {
16493                                         next_policy =
16494                                                 mlx5_flow_meter_policy_find(dev,
16495                                                 next_fm->policy_id, NULL);
16496                                         MLX5_ASSERT(next_policy);
16497                                         next_sub_policy =
16498                                         next_policy->sub_policys[domain][0];
16499                                 }
16500                                 tbl_data =
16501                                         container_of(next_sub_policy->tbl_rsc,
16502                                         struct mlx5_flow_tbl_data_entry, tbl);
16503                                 acts[i].dv_actions[acts[i].actions_n++] =
16504                                                         tbl_data->jump.action;
16505                                 if (mtr_policy->act_cnt[i].modify_hdr)
16506                                         match_src_port = !!transfer;
16507                                 break;
16508                         default:
16509                                 /*Queue action do nothing*/
16510                                 break;
16511                         }
16512                 }
16513         }
16514         if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
16515                                 egress, transfer, match_src_port, acts)) {
16516                 DRV_LOG(ERR,
16517                         "Failed to create policy rules per domain.");
16518                 goto err_exit;
16519         }
16520         return 0;
16521 err_exit:
16522         if (next_fm)
16523                 mlx5_flow_meter_detach(priv, next_fm);
16524         return -1;
16525 }
16526
16527 /**
16528  * Create the policy rules.
16529  *
16530  * @param[in] dev
16531  *   Pointer to Ethernet device.
16532  * @param[in,out] mtr_policy
16533  *   Pointer to meter policy table.
16534  *
16535  * @return
16536  *   0 on success, -1 otherwise.
16537  */
16538 static int
16539 flow_dv_create_policy_rules(struct rte_eth_dev *dev,
16540                              struct mlx5_flow_meter_policy *mtr_policy)
16541 {
16542         int i;
16543         uint16_t sub_policy_num;
16544
16545         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16546                 sub_policy_num = (mtr_policy->sub_policy_num >>
16547                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
16548                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16549                 if (!sub_policy_num)
16550                         continue;
16551                 /* Prepare actions list and create policy rules. */
16552                 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16553                         mtr_policy->sub_policys[i][0], i)) {
16554                         DRV_LOG(ERR, "Failed to create policy action "
16555                                 "list per domain.");
16556                         return -1;
16557                 }
16558         }
16559         return 0;
16560 }
16561
16562 static int
16563 __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
16564 {
16565         struct mlx5_priv *priv = dev->data->dev_private;
16566         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16567         struct mlx5_flow_meter_def_policy *def_policy;
16568         struct mlx5_flow_tbl_resource *jump_tbl;
16569         struct mlx5_flow_tbl_data_entry *tbl_data;
16570         uint8_t egress, transfer;
16571         struct rte_flow_error error;
16572         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16573         int ret;
16574
16575         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16576         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16577         def_policy = mtrmng->def_policy[domain];
16578         if (!def_policy) {
16579                 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
16580                         sizeof(struct mlx5_flow_meter_def_policy),
16581                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
16582                 if (!def_policy) {
16583                         DRV_LOG(ERR, "Failed to alloc default policy table.");
16584                         goto def_policy_error;
16585                 }
16586                 mtrmng->def_policy[domain] = def_policy;
16587                 /* Create the meter suffix table with SUFFIX level. */
16588                 jump_tbl = flow_dv_tbl_resource_get(dev,
16589                                 MLX5_FLOW_TABLE_LEVEL_METER,
16590                                 egress, transfer, false, NULL, 0,
16591                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
16592                 if (!jump_tbl) {
16593                         DRV_LOG(ERR,
16594                                 "Failed to create meter suffix table.");
16595                         goto def_policy_error;
16596                 }
16597                 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
16598                 tbl_data = container_of(jump_tbl,
16599                                         struct mlx5_flow_tbl_data_entry, tbl);
16600                 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
16601                                                 tbl_data->jump.action;
16602                 acts[RTE_COLOR_GREEN].dv_actions[0] = tbl_data->jump.action;
16603                 acts[RTE_COLOR_GREEN].actions_n = 1;
16604                 /*
16605                  * YELLOW has the same default policy as GREEN does.
16606                  * G & Y share the same table and action. The 2nd time of table
16607                  * resource getting is just to update the reference count for
16608                  * the releasing stage.
16609                  */
16610                 jump_tbl = flow_dv_tbl_resource_get(dev,
16611                                 MLX5_FLOW_TABLE_LEVEL_METER,
16612                                 egress, transfer, false, NULL, 0,
16613                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
16614                 if (!jump_tbl) {
16615                         DRV_LOG(ERR,
16616                                 "Failed to get meter suffix table.");
16617                         goto def_policy_error;
16618                 }
16619                 def_policy->sub_policy.jump_tbl[RTE_COLOR_YELLOW] = jump_tbl;
16620                 tbl_data = container_of(jump_tbl,
16621                                         struct mlx5_flow_tbl_data_entry, tbl);
16622                 def_policy->dr_jump_action[RTE_COLOR_YELLOW] =
16623                                                 tbl_data->jump.action;
16624                 acts[RTE_COLOR_YELLOW].dv_actions[0] = tbl_data->jump.action;
16625                 acts[RTE_COLOR_YELLOW].actions_n = 1;
16626                 /* Create jump action to the drop table. */
16627                 if (!mtrmng->drop_tbl[domain]) {
16628                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
16629                                 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
16630                                  egress, transfer, false, NULL, 0,
16631                                  0, MLX5_MTR_TABLE_ID_DROP, &error);
16632                         if (!mtrmng->drop_tbl[domain]) {
16633                                 DRV_LOG(ERR, "Failed to create meter "
16634                                         "drop table for default policy.");
16635                                 goto def_policy_error;
16636                         }
16637                 }
16638                 /* all RED: unique Drop table for jump action. */
16639                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16640                                         struct mlx5_flow_tbl_data_entry, tbl);
16641                 def_policy->dr_jump_action[RTE_COLOR_RED] =
16642                                                 tbl_data->jump.action;
16643                 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
16644                 acts[RTE_COLOR_RED].actions_n = 1;
16645                 /* Create default policy rules. */
16646                 ret = __flow_dv_create_domain_policy_rules(dev,
16647                                         &def_policy->sub_policy,
16648                                         egress, transfer, false, acts);
16649                 if (ret) {
16650                         DRV_LOG(ERR, "Failed to create default policy rules.");
16651                         goto def_policy_error;
16652                 }
16653         }
16654         return 0;
16655 def_policy_error:
16656         __flow_dv_destroy_domain_def_policy(dev,
16657                                             (enum mlx5_meter_domain)domain);
16658         return -1;
16659 }
16660
16661 /**
16662  * Create the default policy table set.
16663  *
16664  * @param[in] dev
16665  *   Pointer to Ethernet device.
16666  * @return
16667  *   0 on success, -1 otherwise.
16668  */
16669 static int
16670 flow_dv_create_def_policy(struct rte_eth_dev *dev)
16671 {
16672         struct mlx5_priv *priv = dev->data->dev_private;
16673         int i;
16674
16675         /* Non-termination policy table. */
16676         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16677                 if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
16678                         continue;
16679                 if (__flow_dv_create_domain_def_policy(dev, i)) {
16680                         DRV_LOG(ERR, "Failed to create default policy");
16681                         /* Rollback the created default policies for others. */
16682                         flow_dv_destroy_def_policy(dev);
16683                         return -1;
16684                 }
16685         }
16686         return 0;
16687 }
16688
16689 /**
16690  * Create the needed meter tables.
16691  * Lock free, (mutex should be acquired by caller).
16692  *
16693  * @param[in] dev
16694  *   Pointer to Ethernet device.
16695  * @param[in] fm
16696  *   Meter information table.
16697  * @param[in] mtr_idx
16698  *   Meter index.
16699  * @param[in] domain_bitmap
16700  *   Domain bitmap.
16701  * @return
16702  *   0 on success, -1 otherwise.
16703  */
16704 static int
16705 flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
16706                         struct mlx5_flow_meter_info *fm,
16707                         uint32_t mtr_idx,
16708                         uint8_t domain_bitmap)
16709 {
16710         struct mlx5_priv *priv = dev->data->dev_private;
16711         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16712         struct rte_flow_error error;
16713         struct mlx5_flow_tbl_data_entry *tbl_data;
16714         uint8_t egress, transfer;
16715         void *actions[METER_ACTIONS];
16716         int domain, ret, i;
16717         struct mlx5_flow_counter *cnt;
16718         struct mlx5_flow_dv_match_params value = {
16719                 .size = sizeof(value.buf),
16720         };
16721         struct mlx5_flow_dv_match_params matcher_para = {
16722                 .size = sizeof(matcher_para.buf),
16723         };
16724         int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
16725                                                      0, &error);
16726         uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
16727         uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
16728         struct mlx5_list_entry *entry;
16729         struct mlx5_flow_dv_matcher matcher = {
16730                 .mask = {
16731                         .size = sizeof(matcher.mask.buf),
16732                 },
16733         };
16734         struct mlx5_flow_dv_matcher *drop_matcher;
16735         struct mlx5_flow_cb_ctx ctx = {
16736                 .error = &error,
16737                 .data = &matcher,
16738         };
16739         uint8_t misc_mask;
16740
16741         if (!priv->mtr_en || mtr_id_reg_c < 0) {
16742                 rte_errno = ENOTSUP;
16743                 return -1;
16744         }
16745         for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
16746                 if (!(domain_bitmap & (1 << domain)) ||
16747                         (mtrmng->def_rule[domain] && !fm->drop_cnt))
16748                         continue;
16749                 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16750                 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16751                 /* Create the drop table with METER DROP level. */
16752                 if (!mtrmng->drop_tbl[domain]) {
16753                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
16754                                         MLX5_FLOW_TABLE_LEVEL_METER,
16755                                         egress, transfer, false, NULL, 0,
16756                                         0, MLX5_MTR_TABLE_ID_DROP, &error);
16757                         if (!mtrmng->drop_tbl[domain]) {
16758                                 DRV_LOG(ERR, "Failed to create meter drop table.");
16759                                 goto policy_error;
16760                         }
16761                 }
16762                 /* Create default matcher in drop table. */
16763                 matcher.tbl = mtrmng->drop_tbl[domain],
16764                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16765                                 struct mlx5_flow_tbl_data_entry, tbl);
16766                 if (!mtrmng->def_matcher[domain]) {
16767                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16768                                        (enum modify_reg)mtr_id_reg_c,
16769                                        0, 0);
16770                         matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
16771                         matcher.crc = rte_raw_cksum
16772                                         ((const void *)matcher.mask.buf,
16773                                         matcher.mask.size);
16774                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16775                         if (!entry) {
16776                                 DRV_LOG(ERR, "Failed to register meter "
16777                                 "drop default matcher.");
16778                                 goto policy_error;
16779                         }
16780                         mtrmng->def_matcher[domain] = container_of(entry,
16781                         struct mlx5_flow_dv_matcher, entry);
16782                 }
16783                 /* Create default rule in drop table. */
16784                 if (!mtrmng->def_rule[domain]) {
16785                         i = 0;
16786                         actions[i++] = priv->sh->dr_drop_action;
16787                         flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16788                                 (enum modify_reg)mtr_id_reg_c, 0, 0);
16789                         misc_mask = flow_dv_matcher_enable(value.buf);
16790                         __flow_dv_adjust_buf_size(&value.size, misc_mask);
16791                         ret = mlx5_flow_os_create_flow
16792                                 (mtrmng->def_matcher[domain]->matcher_object,
16793                                 (void *)&value, i, actions,
16794                                 &mtrmng->def_rule[domain]);
16795                         if (ret) {
16796                                 DRV_LOG(ERR, "Failed to create meter "
16797                                 "default drop rule for drop table.");
16798                                 goto policy_error;
16799                         }
16800                 }
16801                 if (!fm->drop_cnt)
16802                         continue;
16803                 MLX5_ASSERT(mtrmng->max_mtr_bits);
16804                 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
16805                         /* Create matchers for Drop. */
16806                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16807                                         (enum modify_reg)mtr_id_reg_c, 0,
16808                                         (mtr_id_mask << mtr_id_offset));
16809                         matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
16810                         matcher.crc = rte_raw_cksum
16811                                         ((const void *)matcher.mask.buf,
16812                                         matcher.mask.size);
16813                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16814                         if (!entry) {
16815                                 DRV_LOG(ERR,
16816                                 "Failed to register meter drop matcher.");
16817                                 goto policy_error;
16818                         }
16819                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
16820                                 container_of(entry, struct mlx5_flow_dv_matcher,
16821                                              entry);
16822                 }
16823                 drop_matcher =
16824                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
16825                 /* Create drop rule, matching meter_id only. */
16826                 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16827                                 (enum modify_reg)mtr_id_reg_c,
16828                                 (mtr_idx << mtr_id_offset), UINT32_MAX);
16829                 i = 0;
16830                 cnt = flow_dv_counter_get_by_idx(dev,
16831                                         fm->drop_cnt, NULL);
16832                 actions[i++] = cnt->action;
16833                 actions[i++] = priv->sh->dr_drop_action;
16834                 misc_mask = flow_dv_matcher_enable(value.buf);
16835                 __flow_dv_adjust_buf_size(&value.size, misc_mask);
16836                 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
16837                                                (void *)&value, i, actions,
16838                                                &fm->drop_rule[domain]);
16839                 if (ret) {
16840                         DRV_LOG(ERR, "Failed to create meter "
16841                                 "drop rule for drop table.");
16842                                 goto policy_error;
16843                 }
16844         }
16845         return 0;
16846 policy_error:
16847         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16848                 if (fm->drop_rule[i]) {
16849                         claim_zero(mlx5_flow_os_destroy_flow
16850                                 (fm->drop_rule[i]));
16851                         fm->drop_rule[i] = NULL;
16852                 }
16853         }
16854         return -1;
16855 }
16856
16857 static struct mlx5_flow_meter_sub_policy *
16858 __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
16859                 struct mlx5_flow_meter_policy *mtr_policy,
16860                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS],
16861                 struct mlx5_flow_meter_sub_policy *next_sub_policy,
16862                 bool *is_reuse)
16863 {
16864         struct mlx5_priv *priv = dev->data->dev_private;
16865         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16866         uint32_t sub_policy_idx = 0;
16867         uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
16868         uint32_t i, j;
16869         struct mlx5_hrxq *hrxq;
16870         struct mlx5_flow_handle dh;
16871         struct mlx5_meter_policy_action_container *act_cnt;
16872         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16873         uint16_t sub_policy_num;
16874         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
16875
16876         MLX5_ASSERT(wks);
16877         rte_spinlock_lock(&mtr_policy->sl);
16878         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16879                 if (!rss_desc[i])
16880                         continue;
16881                 hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
16882                 if (!hrxq_idx[i]) {
16883                         rte_spinlock_unlock(&mtr_policy->sl);
16884                         return NULL;
16885                 }
16886         }
16887         sub_policy_num = (mtr_policy->sub_policy_num >>
16888                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16889                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16890         for (j = 0; j < sub_policy_num; j++) {
16891                 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16892                         if (rss_desc[i] &&
16893                             hrxq_idx[i] !=
16894                             mtr_policy->sub_policys[domain][j]->rix_hrxq[i])
16895                                 break;
16896                 }
16897                 if (i >= MLX5_MTR_RTE_COLORS) {
16898                         /*
16899                          * Found the sub policy table with
16900                          * the same queue per color.
16901                          */
16902                         rte_spinlock_unlock(&mtr_policy->sl);
16903                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16904                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
16905                         *is_reuse = true;
16906                         return mtr_policy->sub_policys[domain][j];
16907                 }
16908         }
16909         /* Create sub policy. */
16910         if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
16911                 /* Reuse the first pre-allocated sub_policy. */
16912                 sub_policy = mtr_policy->sub_policys[domain][0];
16913                 sub_policy_idx = sub_policy->idx;
16914         } else {
16915                 sub_policy = mlx5_ipool_zmalloc
16916                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16917                                  &sub_policy_idx);
16918                 if (!sub_policy ||
16919                     sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
16920                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16921                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
16922                         goto rss_sub_policy_error;
16923                 }
16924                 sub_policy->idx = sub_policy_idx;
16925                 sub_policy->main_policy = mtr_policy;
16926         }
16927         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16928                 if (!rss_desc[i])
16929                         continue;
16930                 sub_policy->rix_hrxq[i] = hrxq_idx[i];
16931                 if (mtr_policy->is_hierarchy) {
16932                         act_cnt = &mtr_policy->act_cnt[i];
16933                         act_cnt->next_sub_policy = next_sub_policy;
16934                         mlx5_hrxq_release(dev, hrxq_idx[i]);
16935                 } else {
16936                         /*
16937                          * Overwrite the last action from
16938                          * RSS action to Queue action.
16939                          */
16940                         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
16941                                               hrxq_idx[i]);
16942                         if (!hrxq) {
16943                                 DRV_LOG(ERR, "Failed to get policy hrxq");
16944                                 goto rss_sub_policy_error;
16945                         }
16946                         act_cnt = &mtr_policy->act_cnt[i];
16947                         if (act_cnt->rix_mark || act_cnt->modify_hdr) {
16948                                 memset(&dh, 0, sizeof(struct mlx5_flow_handle));
16949                                 if (act_cnt->rix_mark)
16950                                         wks->mark = 1;
16951                                 dh.fate_action = MLX5_FLOW_FATE_QUEUE;
16952                                 dh.rix_hrxq = hrxq_idx[i];
16953                                 flow_drv_rxq_flags_set(dev, &dh);
16954                         }
16955                 }
16956         }
16957         if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16958                                                sub_policy, domain)) {
16959                 DRV_LOG(ERR, "Failed to create policy "
16960                         "rules for ingress domain.");
16961                 goto rss_sub_policy_error;
16962         }
16963         if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16964                 i = (mtr_policy->sub_policy_num >>
16965                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16966                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16967                 if (i >= MLX5_MTR_RSS_MAX_SUB_POLICY) {
16968                         DRV_LOG(ERR, "No free sub-policy slot.");
16969                         goto rss_sub_policy_error;
16970                 }
16971                 mtr_policy->sub_policys[domain][i] = sub_policy;
16972                 i++;
16973                 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16974                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16975                 mtr_policy->sub_policy_num |=
16976                         (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16977                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16978         }
16979         rte_spinlock_unlock(&mtr_policy->sl);
16980         *is_reuse = false;
16981         return sub_policy;
16982 rss_sub_policy_error:
16983         if (sub_policy) {
16984                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16985                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16986                         i = (mtr_policy->sub_policy_num >>
16987                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16988                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16989                         mtr_policy->sub_policys[domain][i] = NULL;
16990                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16991                                         sub_policy->idx);
16992                 }
16993         }
16994         rte_spinlock_unlock(&mtr_policy->sl);
16995         return NULL;
16996 }
16997
16998 /**
16999  * Find the policy table for prefix table with RSS.
17000  *
17001  * @param[in] dev
17002  *   Pointer to Ethernet device.
17003  * @param[in] mtr_policy
17004  *   Pointer to meter policy table.
17005  * @param[in] rss_desc
17006  *   Pointer to rss_desc
17007  * @return
17008  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
17009  */
17010 static struct mlx5_flow_meter_sub_policy *
17011 flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
17012                 struct mlx5_flow_meter_policy *mtr_policy,
17013                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
17014 {
17015         struct mlx5_priv *priv = dev->data->dev_private;
17016         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
17017         struct mlx5_flow_meter_info *next_fm;
17018         struct mlx5_flow_meter_policy *next_policy;
17019         struct mlx5_flow_meter_sub_policy *next_sub_policy = NULL;
17020         struct mlx5_flow_meter_policy *policies[MLX5_MTR_CHAIN_MAX_NUM];
17021         struct mlx5_flow_meter_sub_policy *sub_policies[MLX5_MTR_CHAIN_MAX_NUM];
17022         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
17023         bool reuse_sub_policy;
17024         uint32_t i = 0;
17025         uint32_t j = 0;
17026
17027         while (true) {
17028                 /* Iterate hierarchy to get all policies in this hierarchy. */
17029                 policies[i++] = mtr_policy;
17030                 if (!mtr_policy->is_hierarchy)
17031                         break;
17032                 if (i >= MLX5_MTR_CHAIN_MAX_NUM) {
17033                         DRV_LOG(ERR, "Exceed max meter number in hierarchy.");
17034                         return NULL;
17035                 }
17036                 next_fm = mlx5_flow_meter_find(priv,
17037                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
17038                 if (!next_fm) {
17039                         DRV_LOG(ERR, "Failed to get next meter in hierarchy.");
17040                         return NULL;
17041                 }
17042                 next_policy =
17043                         mlx5_flow_meter_policy_find(dev, next_fm->policy_id,
17044                                                     NULL);
17045                 MLX5_ASSERT(next_policy);
17046                 mtr_policy = next_policy;
17047         }
17048         while (i) {
17049                 /**
17050                  * From last policy to the first one in hierarchy,
17051                  * create / get the sub policy for each of them.
17052                  */
17053                 sub_policy = __flow_dv_meter_get_rss_sub_policy(dev,
17054                                                         policies[--i],
17055                                                         rss_desc,
17056                                                         next_sub_policy,
17057                                                         &reuse_sub_policy);
17058                 if (!sub_policy) {
17059                         DRV_LOG(ERR, "Failed to get the sub policy.");
17060                         goto err_exit;
17061                 }
17062                 if (!reuse_sub_policy)
17063                         sub_policies[j++] = sub_policy;
17064                 next_sub_policy = sub_policy;
17065         }
17066         return sub_policy;
17067 err_exit:
17068         while (j) {
17069                 uint16_t sub_policy_num;
17070
17071                 sub_policy = sub_policies[--j];
17072                 mtr_policy = sub_policy->main_policy;
17073                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
17074                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
17075                         sub_policy_num = (mtr_policy->sub_policy_num >>
17076                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17077                                 MLX5_MTR_SUB_POLICY_NUM_MASK;
17078                         mtr_policy->sub_policys[domain][sub_policy_num - 1] =
17079                                                                         NULL;
17080                         sub_policy_num--;
17081                         mtr_policy->sub_policy_num &=
17082                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17083                                   (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i));
17084                         mtr_policy->sub_policy_num |=
17085                         (sub_policy_num & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17086                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i);
17087                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17088                                         sub_policy->idx);
17089                 }
17090         }
17091         return NULL;
17092 }
17093
17094 /**
17095  * Create the sub policy tag rule for all meters in hierarchy.
17096  *
17097  * @param[in] dev
17098  *   Pointer to Ethernet device.
17099  * @param[in] fm
17100  *   Meter information table.
17101  * @param[in] src_port
17102  *   The src port this extra rule should use.
17103  * @param[in] item
17104  *   The src port match item.
17105  * @param[out] error
17106  *   Perform verbose error reporting if not NULL.
17107  * @return
17108  *   0 on success, a negative errno value otherwise and rte_errno is set.
17109  */
17110 static int
17111 flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
17112                                 struct mlx5_flow_meter_info *fm,
17113                                 int32_t src_port,
17114                                 const struct rte_flow_item *item,
17115                                 struct rte_flow_error *error)
17116 {
17117         struct mlx5_priv *priv = dev->data->dev_private;
17118         struct mlx5_flow_meter_policy *mtr_policy;
17119         struct mlx5_flow_meter_sub_policy *sub_policy;
17120         struct mlx5_flow_meter_info *next_fm = NULL;
17121         struct mlx5_flow_meter_policy *next_policy;
17122         struct mlx5_flow_meter_sub_policy *next_sub_policy;
17123         struct mlx5_flow_tbl_data_entry *tbl_data;
17124         struct mlx5_sub_policy_color_rule *color_rule;
17125         struct mlx5_meter_policy_acts acts;
17126         uint32_t color_reg_c_idx;
17127         bool mtr_first = (src_port != UINT16_MAX) ? true : false;
17128         struct rte_flow_attr attr = {
17129                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
17130                 .priority = 0,
17131                 .ingress = 0,
17132                 .egress = 0,
17133                 .transfer = 1,
17134                 .reserved = 0,
17135         };
17136         uint32_t domain = MLX5_MTR_DOMAIN_TRANSFER;
17137         int i;
17138
17139         mtr_policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
17140         MLX5_ASSERT(mtr_policy);
17141         if (!mtr_policy->is_hierarchy)
17142                 return 0;
17143         next_fm = mlx5_flow_meter_find(priv,
17144                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
17145         if (!next_fm) {
17146                 return rte_flow_error_set(error, EINVAL,
17147                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
17148                                 "Failed to find next meter in hierarchy.");
17149         }
17150         if (!next_fm->drop_cnt)
17151                 goto exit;
17152         color_reg_c_idx = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, error);
17153         sub_policy = mtr_policy->sub_policys[domain][0];
17154         for (i = 0; i < RTE_COLORS; i++) {
17155                 bool rule_exist = false;
17156                 struct mlx5_meter_policy_action_container *act_cnt;
17157
17158                 if (i >= RTE_COLOR_YELLOW)
17159                         break;
17160                 TAILQ_FOREACH(color_rule,
17161                               &sub_policy->color_rules[i], next_port)
17162                         if (color_rule->src_port == src_port) {
17163                                 rule_exist = true;
17164                                 break;
17165                         }
17166                 if (rule_exist)
17167                         continue;
17168                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
17169                                 sizeof(struct mlx5_sub_policy_color_rule),
17170                                 0, SOCKET_ID_ANY);
17171                 if (!color_rule)
17172                         return rte_flow_error_set(error, ENOMEM,
17173                                 RTE_FLOW_ERROR_TYPE_ACTION,
17174                                 NULL, "No memory to create tag color rule.");
17175                 color_rule->src_port = src_port;
17176                 attr.priority = i;
17177                 next_policy = mlx5_flow_meter_policy_find(dev,
17178                                                 next_fm->policy_id, NULL);
17179                 MLX5_ASSERT(next_policy);
17180                 next_sub_policy = next_policy->sub_policys[domain][0];
17181                 tbl_data = container_of(next_sub_policy->tbl_rsc,
17182                                         struct mlx5_flow_tbl_data_entry, tbl);
17183                 act_cnt = &mtr_policy->act_cnt[i];
17184                 if (mtr_first) {
17185                         acts.dv_actions[0] = next_fm->meter_action;
17186                         acts.dv_actions[1] = act_cnt->modify_hdr->action;
17187                 } else {
17188                         acts.dv_actions[0] = act_cnt->modify_hdr->action;
17189                         acts.dv_actions[1] = next_fm->meter_action;
17190                 }
17191                 acts.dv_actions[2] = tbl_data->jump.action;
17192                 acts.actions_n = 3;
17193                 if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) {
17194                         next_fm = NULL;
17195                         goto err_exit;
17196                 }
17197                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
17198                                 MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
17199                                 &attr, true, item,
17200                                 &color_rule->matcher, error)) {
17201                         rte_flow_error_set(error, errno,
17202                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17203                                 "Failed to create hierarchy meter matcher.");
17204                         goto err_exit;
17205                 }
17206                 if (__flow_dv_create_policy_flow(dev, color_reg_c_idx,
17207                                         (enum rte_color)i,
17208                                         color_rule->matcher->matcher_object,
17209                                         acts.actions_n, acts.dv_actions,
17210                                         true, item,
17211                                         &color_rule->rule, &attr)) {
17212                         rte_flow_error_set(error, errno,
17213                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17214                                 "Failed to create hierarchy meter rule.");
17215                         goto err_exit;
17216                 }
17217                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
17218                                   color_rule, next_port);
17219         }
17220 exit:
17221         /**
17222          * Recursive call to iterate all meters in hierarchy and
17223          * create needed rules.
17224          */
17225         return flow_dv_meter_hierarchy_rule_create(dev, next_fm,
17226                                                 src_port, item, error);
17227 err_exit:
17228         if (color_rule) {
17229                 if (color_rule->rule)
17230                         mlx5_flow_os_destroy_flow(color_rule->rule);
17231                 if (color_rule->matcher) {
17232                         struct mlx5_flow_tbl_data_entry *tbl =
17233                                 container_of(color_rule->matcher->tbl,
17234                                                 typeof(*tbl), tbl);
17235                         mlx5_list_unregister(tbl->matchers,
17236                                                 &color_rule->matcher->entry);
17237                 }
17238                 mlx5_free(color_rule);
17239         }
17240         if (next_fm)
17241                 mlx5_flow_meter_detach(priv, next_fm);
17242         return -rte_errno;
17243 }
17244
17245 /**
17246  * Destroy the sub policy table with RX queue.
17247  *
17248  * @param[in] dev
17249  *   Pointer to Ethernet device.
17250  * @param[in] mtr_policy
17251  *   Pointer to meter policy table.
17252  */
17253 static void
17254 flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
17255                                     struct mlx5_flow_meter_policy *mtr_policy)
17256 {
17257         struct mlx5_priv *priv = dev->data->dev_private;
17258         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
17259         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
17260         uint32_t i, j;
17261         uint16_t sub_policy_num, new_policy_num;
17262
17263         rte_spinlock_lock(&mtr_policy->sl);
17264         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
17265                 switch (mtr_policy->act_cnt[i].fate_action) {
17266                 case MLX5_FLOW_FATE_SHARED_RSS:
17267                         sub_policy_num = (mtr_policy->sub_policy_num >>
17268                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17269                         MLX5_MTR_SUB_POLICY_NUM_MASK;
17270                         new_policy_num = sub_policy_num;
17271                         for (j = 0; j < sub_policy_num; j++) {
17272                                 sub_policy =
17273                                         mtr_policy->sub_policys[domain][j];
17274                                 if (sub_policy) {
17275                                         __flow_dv_destroy_sub_policy_rules(dev,
17276                                                 sub_policy);
17277                                 if (sub_policy !=
17278                                         mtr_policy->sub_policys[domain][0]) {
17279                                         mtr_policy->sub_policys[domain][j] =
17280                                                                 NULL;
17281                                         mlx5_ipool_free
17282                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17283                                                 sub_policy->idx);
17284                                                 new_policy_num--;
17285                                         }
17286                                 }
17287                         }
17288                         if (new_policy_num != sub_policy_num) {
17289                                 mtr_policy->sub_policy_num &=
17290                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17291                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
17292                                 mtr_policy->sub_policy_num |=
17293                                 (new_policy_num &
17294                                         MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17295                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
17296                         }
17297                         break;
17298                 case MLX5_FLOW_FATE_QUEUE:
17299                         sub_policy = mtr_policy->sub_policys[domain][0];
17300                         __flow_dv_destroy_sub_policy_rules(dev,
17301                                                            sub_policy);
17302                         break;
17303                 default:
17304                         /*Other actions without queue and do nothing*/
17305                         break;
17306                 }
17307         }
17308         rte_spinlock_unlock(&mtr_policy->sl);
17309 }
17310 /**
17311  * Check whether the DR drop action is supported on the root table or not.
17312  *
17313  * Create a simple flow with DR drop action on root table to validate
17314  * if DR drop action on root table is supported or not.
17315  *
17316  * @param[in] dev
17317  *   Pointer to rte_eth_dev structure.
17318  *
17319  * @return
17320  *   0 on success, a negative errno value otherwise and rte_errno is set.
17321  */
17322 int
17323 mlx5_flow_discover_dr_action_support(struct rte_eth_dev *dev)
17324 {
17325         struct mlx5_priv *priv = dev->data->dev_private;
17326         struct mlx5_dev_ctx_shared *sh = priv->sh;
17327         struct mlx5_flow_dv_match_params mask = {
17328                 .size = sizeof(mask.buf),
17329         };
17330         struct mlx5_flow_dv_match_params value = {
17331                 .size = sizeof(value.buf),
17332         };
17333         struct mlx5dv_flow_matcher_attr dv_attr = {
17334                 .type = IBV_FLOW_ATTR_NORMAL,
17335                 .priority = 0,
17336                 .match_criteria_enable = 0,
17337                 .match_mask = (void *)&mask,
17338         };
17339         struct mlx5_flow_tbl_resource *tbl = NULL;
17340         void *matcher = NULL;
17341         void *flow = NULL;
17342         int ret = -1;
17343
17344         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL,
17345                                         0, 0, 0, NULL);
17346         if (!tbl)
17347                 goto err;
17348         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17349         __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17350         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
17351                                                tbl->obj, &matcher);
17352         if (ret)
17353                 goto err;
17354         __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17355         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17356                                        &sh->dr_drop_action, &flow);
17357 err:
17358         /*
17359          * If DR drop action is not supported on root table, flow create will
17360          * be failed with EOPNOTSUPP or EPROTONOSUPPORT.
17361          */
17362         if (!flow) {
17363                 if (matcher &&
17364                     (errno == EPROTONOSUPPORT || errno == EOPNOTSUPP))
17365                         DRV_LOG(INFO, "DR drop action is not supported in root table.");
17366                 else
17367                         DRV_LOG(ERR, "Unexpected error in DR drop action support detection");
17368                 ret = -1;
17369         } else {
17370                 claim_zero(mlx5_flow_os_destroy_flow(flow));
17371         }
17372         if (matcher)
17373                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
17374         if (tbl)
17375                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
17376         return ret;
17377 }
17378
17379 /**
17380  * Validate the batch counter support in root table.
17381  *
17382  * Create a simple flow with invalid counter and drop action on root table to
17383  * validate if batch counter with offset on root table is supported or not.
17384  *
17385  * @param[in] dev
17386  *   Pointer to rte_eth_dev structure.
17387  *
17388  * @return
17389  *   0 on success, a negative errno value otherwise and rte_errno is set.
17390  */
17391 int
17392 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
17393 {
17394         struct mlx5_priv *priv = dev->data->dev_private;
17395         struct mlx5_dev_ctx_shared *sh = priv->sh;
17396         struct mlx5_flow_dv_match_params mask = {
17397                 .size = sizeof(mask.buf),
17398         };
17399         struct mlx5_flow_dv_match_params value = {
17400                 .size = sizeof(value.buf),
17401         };
17402         struct mlx5dv_flow_matcher_attr dv_attr = {
17403                 .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
17404                 .priority = 0,
17405                 .match_criteria_enable = 0,
17406                 .match_mask = (void *)&mask,
17407         };
17408         void *actions[2] = { 0 };
17409         struct mlx5_flow_tbl_resource *tbl = NULL;
17410         struct mlx5_devx_obj *dcs = NULL;
17411         void *matcher = NULL;
17412         void *flow = NULL;
17413         int ret = -1;
17414
17415         tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
17416                                         0, 0, 0, NULL);
17417         if (!tbl)
17418                 goto err;
17419         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
17420         if (!dcs)
17421                 goto err;
17422         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
17423                                                     &actions[0]);
17424         if (ret)
17425                 goto err;
17426         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17427         __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17428         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
17429                                                tbl->obj, &matcher);
17430         if (ret)
17431                 goto err;
17432         __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17433         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17434                                        actions, &flow);
17435 err:
17436         /*
17437          * If batch counter with offset is not supported, the driver will not
17438          * validate the invalid offset value, flow create should success.
17439          * In this case, it means batch counter is not supported in root table.
17440          *
17441          * Otherwise, if flow create is failed, counter offset is supported.
17442          */
17443         if (flow) {
17444                 DRV_LOG(INFO, "Batch counter is not supported in root "
17445                               "table. Switch to fallback mode.");
17446                 rte_errno = ENOTSUP;
17447                 ret = -rte_errno;
17448                 claim_zero(mlx5_flow_os_destroy_flow(flow));
17449         } else {
17450                 /* Check matcher to make sure validate fail at flow create. */
17451                 if (!matcher || (matcher && errno != EINVAL))
17452                         DRV_LOG(ERR, "Unexpected error in counter offset "
17453                                      "support detection");
17454                 ret = 0;
17455         }
17456         if (actions[0])
17457                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
17458         if (matcher)
17459                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
17460         if (tbl)
17461                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
17462         if (dcs)
17463                 claim_zero(mlx5_devx_cmd_destroy(dcs));
17464         return ret;
17465 }
17466
17467 /**
17468  * Query a devx counter.
17469  *
17470  * @param[in] dev
17471  *   Pointer to the Ethernet device structure.
17472  * @param[in] cnt
17473  *   Index to the flow counter.
17474  * @param[in] clear
17475  *   Set to clear the counter statistics.
17476  * @param[out] pkts
17477  *   The statistics value of packets.
17478  * @param[out] bytes
17479  *   The statistics value of bytes.
17480  *
17481  * @return
17482  *   0 on success, otherwise return -1.
17483  */
17484 static int
17485 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
17486                       uint64_t *pkts, uint64_t *bytes)
17487 {
17488         struct mlx5_priv *priv = dev->data->dev_private;
17489         struct mlx5_flow_counter *cnt;
17490         uint64_t inn_pkts, inn_bytes;
17491         int ret;
17492
17493         if (!priv->sh->devx)
17494                 return -1;
17495
17496         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
17497         if (ret)
17498                 return -1;
17499         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
17500         *pkts = inn_pkts - cnt->hits;
17501         *bytes = inn_bytes - cnt->bytes;
17502         if (clear) {
17503                 cnt->hits = inn_pkts;
17504                 cnt->bytes = inn_bytes;
17505         }
17506         return 0;
17507 }
17508
17509 /**
17510  * Get aged-out flows.
17511  *
17512  * @param[in] dev
17513  *   Pointer to the Ethernet device structure.
17514  * @param[in] context
17515  *   The address of an array of pointers to the aged-out flows contexts.
17516  * @param[in] nb_contexts
17517  *   The length of context array pointers.
17518  * @param[out] error
17519  *   Perform verbose error reporting if not NULL. Initialized in case of
17520  *   error only.
17521  *
17522  * @return
17523  *   how many contexts get in success, otherwise negative errno value.
17524  *   if nb_contexts is 0, return the amount of all aged contexts.
17525  *   if nb_contexts is not 0 , return the amount of aged flows reported
17526  *   in the context array.
17527  * @note: only stub for now
17528  */
17529 static int
17530 flow_dv_get_aged_flows(struct rte_eth_dev *dev,
17531                     void **context,
17532                     uint32_t nb_contexts,
17533                     struct rte_flow_error *error)
17534 {
17535         struct mlx5_priv *priv = dev->data->dev_private;
17536         struct mlx5_age_info *age_info;
17537         struct mlx5_age_param *age_param;
17538         struct mlx5_flow_counter *counter;
17539         struct mlx5_aso_age_action *act;
17540         int nb_flows = 0;
17541
17542         if (nb_contexts && !context)
17543                 return rte_flow_error_set(error, EINVAL,
17544                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17545                                           NULL, "empty context");
17546         age_info = GET_PORT_AGE_INFO(priv);
17547         rte_spinlock_lock(&age_info->aged_sl);
17548         LIST_FOREACH(act, &age_info->aged_aso, next) {
17549                 nb_flows++;
17550                 if (nb_contexts) {
17551                         context[nb_flows - 1] =
17552                                                 act->age_params.context;
17553                         if (!(--nb_contexts))
17554                                 break;
17555                 }
17556         }
17557         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
17558                 nb_flows++;
17559                 if (nb_contexts) {
17560                         age_param = MLX5_CNT_TO_AGE(counter);
17561                         context[nb_flows - 1] = age_param->context;
17562                         if (!(--nb_contexts))
17563                                 break;
17564                 }
17565         }
17566         rte_spinlock_unlock(&age_info->aged_sl);
17567         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
17568         return nb_flows;
17569 }
17570
17571 /*
17572  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
17573  */
17574 static uint32_t
17575 flow_dv_counter_allocate(struct rte_eth_dev *dev)
17576 {
17577         return flow_dv_counter_alloc(dev, 0);
17578 }
17579
17580 /**
17581  * Validate indirect action.
17582  * Dispatcher for action type specific validation.
17583  *
17584  * @param[in] dev
17585  *   Pointer to the Ethernet device structure.
17586  * @param[in] conf
17587  *   Indirect action configuration.
17588  * @param[in] action
17589  *   The indirect action object to validate.
17590  * @param[out] error
17591  *   Perform verbose error reporting if not NULL. Initialized in case of
17592  *   error only.
17593  *
17594  * @return
17595  *   0 on success, otherwise negative errno value.
17596  */
17597 static int
17598 flow_dv_action_validate(struct rte_eth_dev *dev,
17599                         const struct rte_flow_indir_action_conf *conf,
17600                         const struct rte_flow_action *action,
17601                         struct rte_flow_error *err)
17602 {
17603         struct mlx5_priv *priv = dev->data->dev_private;
17604
17605         RTE_SET_USED(conf);
17606         switch (action->type) {
17607         case RTE_FLOW_ACTION_TYPE_RSS:
17608                 /*
17609                  * priv->obj_ops is set according to driver capabilities.
17610                  * When DevX capabilities are
17611                  * sufficient, it is set to devx_obj_ops.
17612                  * Otherwise, it is set to ibv_obj_ops.
17613                  * ibv_obj_ops doesn't support ind_table_modify operation.
17614                  * In this case the indirect RSS action can't be used.
17615                  */
17616                 if (priv->obj_ops.ind_table_modify == NULL)
17617                         return rte_flow_error_set
17618                                         (err, ENOTSUP,
17619                                          RTE_FLOW_ERROR_TYPE_ACTION,
17620                                          NULL,
17621                                          "Indirect RSS action not supported");
17622                 return mlx5_validate_action_rss(dev, action, err);
17623         case RTE_FLOW_ACTION_TYPE_AGE:
17624                 if (!priv->sh->aso_age_mng)
17625                         return rte_flow_error_set(err, ENOTSUP,
17626                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17627                                                 NULL,
17628                                                 "Indirect age action not supported");
17629                 return flow_dv_validate_action_age(0, action, dev, err);
17630         case RTE_FLOW_ACTION_TYPE_COUNT:
17631                 return flow_dv_validate_action_count(dev, true, 0, err);
17632         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
17633                 if (!priv->sh->ct_aso_en)
17634                         return rte_flow_error_set(err, ENOTSUP,
17635                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17636                                         "ASO CT is not supported");
17637                 return mlx5_validate_action_ct(dev, action->conf, err);
17638         default:
17639                 return rte_flow_error_set(err, ENOTSUP,
17640                                           RTE_FLOW_ERROR_TYPE_ACTION,
17641                                           NULL,
17642                                           "action type not supported");
17643         }
17644 }
17645
17646 /*
17647  * Check if the RSS configurations for colors of a meter policy match
17648  * each other, except the queues.
17649  *
17650  * @param[in] r1
17651  *   Pointer to the first RSS flow action.
17652  * @param[in] r2
17653  *   Pointer to the second RSS flow action.
17654  *
17655  * @return
17656  *   0 on match, 1 on conflict.
17657  */
17658 static inline int
17659 flow_dv_mtr_policy_rss_compare(const struct rte_flow_action_rss *r1,
17660                                const struct rte_flow_action_rss *r2)
17661 {
17662         if (r1 == NULL || r2 == NULL)
17663                 return 0;
17664         if (!(r1->level <= 1 && r2->level <= 1) &&
17665             !(r1->level > 1 && r2->level > 1))
17666                 return 1;
17667         if (r1->types != r2->types &&
17668             !((r1->types == 0 || r1->types == RTE_ETH_RSS_IP) &&
17669               (r2->types == 0 || r2->types == RTE_ETH_RSS_IP)))
17670                 return 1;
17671         if (r1->key || r2->key) {
17672                 const void *key1 = r1->key ? r1->key : rss_hash_default_key;
17673                 const void *key2 = r2->key ? r2->key : rss_hash_default_key;
17674
17675                 if (memcmp(key1, key2, MLX5_RSS_HASH_KEY_LEN))
17676                         return 1;
17677         }
17678         return 0;
17679 }
17680
17681 /**
17682  * Validate the meter hierarchy chain for meter policy.
17683  *
17684  * @param[in] dev
17685  *   Pointer to the Ethernet device structure.
17686  * @param[in] meter_id
17687  *   Meter id.
17688  * @param[in] action_flags
17689  *   Holds the actions detected until now.
17690  * @param[out] is_rss
17691  *   Is RSS or not.
17692  * @param[out] hierarchy_domain
17693  *   The domain bitmap for hierarchy policy.
17694  * @param[out] error
17695  *   Perform verbose error reporting if not NULL. Initialized in case of
17696  *   error only.
17697  *
17698  * @return
17699  *   0 on success, otherwise negative errno value with error set.
17700  */
17701 static int
17702 flow_dv_validate_policy_mtr_hierarchy(struct rte_eth_dev *dev,
17703                                   uint32_t meter_id,
17704                                   uint64_t action_flags,
17705                                   bool *is_rss,
17706                                   uint8_t *hierarchy_domain,
17707                                   struct rte_mtr_error *error)
17708 {
17709         struct mlx5_priv *priv = dev->data->dev_private;
17710         struct mlx5_flow_meter_info *fm;
17711         struct mlx5_flow_meter_policy *policy;
17712         uint8_t cnt = 1;
17713
17714         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
17715                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
17716                 return -rte_mtr_error_set(error, EINVAL,
17717                                         RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN,
17718                                         NULL,
17719                                         "Multiple fate actions not supported.");
17720         *hierarchy_domain = 0;
17721         while (true) {
17722                 fm = mlx5_flow_meter_find(priv, meter_id, NULL);
17723                 if (!fm)
17724                         return -rte_mtr_error_set(error, EINVAL,
17725                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17726                                         "Meter not found in meter hierarchy.");
17727                 if (fm->def_policy)
17728                         return -rte_mtr_error_set(error, EINVAL,
17729                                         RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17730                         "Non termination meter not supported in hierarchy.");
17731                 policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
17732                 MLX5_ASSERT(policy);
17733                 /**
17734                  * Only inherit the supported domains of the first meter in
17735                  * hierarchy.
17736                  * One meter supports at least one domain.
17737                  */
17738                 if (!*hierarchy_domain) {
17739                         if (policy->transfer)
17740                                 *hierarchy_domain |=
17741                                                 MLX5_MTR_DOMAIN_TRANSFER_BIT;
17742                         if (policy->ingress)
17743                                 *hierarchy_domain |=
17744                                                 MLX5_MTR_DOMAIN_INGRESS_BIT;
17745                         if (policy->egress)
17746                                 *hierarchy_domain |= MLX5_MTR_DOMAIN_EGRESS_BIT;
17747                 }
17748                 if (!policy->is_hierarchy) {
17749                         *is_rss = policy->is_rss;
17750                         break;
17751                 }
17752                 meter_id = policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id;
17753                 if (++cnt >= MLX5_MTR_CHAIN_MAX_NUM)
17754                         return -rte_mtr_error_set(error, EINVAL,
17755                                         RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
17756                                         "Exceed max hierarchy meter number.");
17757         }
17758         return 0;
17759 }
17760
17761 /**
17762  * Validate meter policy actions.
17763  * Dispatcher for action type specific validation.
17764  *
17765  * @param[in] dev
17766  *   Pointer to the Ethernet device structure.
17767  * @param[in] action
17768  *   The meter policy action object to validate.
17769  * @param[in] attr
17770  *   Attributes of flow to determine steering domain.
17771  * @param[out] error
17772  *   Perform verbose error reporting if not NULL. Initialized in case of
17773  *   error only.
17774  *
17775  * @return
17776  *   0 on success, otherwise negative errno value.
17777  */
17778 static int
17779 flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
17780                         const struct rte_flow_action *actions[RTE_COLORS],
17781                         struct rte_flow_attr *attr,
17782                         bool *is_rss,
17783                         uint8_t *domain_bitmap,
17784                         uint8_t *policy_mode,
17785                         struct rte_mtr_error *error)
17786 {
17787         struct mlx5_priv *priv = dev->data->dev_private;
17788         struct mlx5_dev_config *dev_conf = &priv->config;
17789         const struct rte_flow_action *act;
17790         uint64_t action_flags[RTE_COLORS] = {0};
17791         int actions_n;
17792         int i, ret;
17793         struct rte_flow_error flow_err;
17794         uint8_t domain_color[RTE_COLORS] = {0};
17795         uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
17796         uint8_t hierarchy_domain = 0;
17797         const struct rte_flow_action_meter *mtr;
17798         bool def_green = false;
17799         bool def_yellow = false;
17800         const struct rte_flow_action_rss *rss_color[RTE_COLORS] = {NULL};
17801
17802         if (!priv->config.dv_esw_en)
17803                 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
17804         *domain_bitmap = def_domain;
17805         /* Red color could only support DROP action. */
17806         if (!actions[RTE_COLOR_RED] ||
17807             actions[RTE_COLOR_RED]->type != RTE_FLOW_ACTION_TYPE_DROP)
17808                 return -rte_mtr_error_set(error, ENOTSUP,
17809                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17810                                 NULL, "Red color only supports drop action.");
17811         /*
17812          * Check default policy actions:
17813          * Green / Yellow: no action, Red: drop action
17814          * Either G or Y will trigger default policy actions to be created.
17815          */
17816         if (!actions[RTE_COLOR_GREEN] ||
17817             actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)
17818                 def_green = true;
17819         if (!actions[RTE_COLOR_YELLOW] ||
17820             actions[RTE_COLOR_YELLOW]->type == RTE_FLOW_ACTION_TYPE_END)
17821                 def_yellow = true;
17822         if (def_green && def_yellow) {
17823                 *policy_mode = MLX5_MTR_POLICY_MODE_DEF;
17824                 return 0;
17825         } else if (!def_green && def_yellow) {
17826                 *policy_mode = MLX5_MTR_POLICY_MODE_OG;
17827         } else if (def_green && !def_yellow) {
17828                 *policy_mode = MLX5_MTR_POLICY_MODE_OY;
17829         } else {
17830                 *policy_mode = MLX5_MTR_POLICY_MODE_ALL;
17831         }
17832         /* Set to empty string in case of NULL pointer access by user. */
17833         flow_err.message = "";
17834         for (i = 0; i < RTE_COLORS; i++) {
17835                 act = actions[i];
17836                 for (action_flags[i] = 0, actions_n = 0;
17837                      act && act->type != RTE_FLOW_ACTION_TYPE_END;
17838                      act++) {
17839                         if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
17840                                 return -rte_mtr_error_set(error, ENOTSUP,
17841                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17842                                           NULL, "too many actions");
17843                         switch (act->type) {
17844                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
17845                         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
17846                                 if (!priv->config.dv_esw_en)
17847                                         return -rte_mtr_error_set(error,
17848                                         ENOTSUP,
17849                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17850                                         NULL, "PORT action validate check"
17851                                         " fail for ESW disable");
17852                                 ret = flow_dv_validate_action_port_id(dev,
17853                                                 action_flags[i],
17854                                                 act, attr, &flow_err);
17855                                 if (ret)
17856                                         return -rte_mtr_error_set(error,
17857                                         ENOTSUP,
17858                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17859                                         NULL, flow_err.message ?
17860                                         flow_err.message :
17861                                         "PORT action validate check fail");
17862                                 ++actions_n;
17863                                 action_flags[i] |= MLX5_FLOW_ACTION_PORT_ID;
17864                                 break;
17865                         case RTE_FLOW_ACTION_TYPE_MARK:
17866                                 ret = flow_dv_validate_action_mark(dev, act,
17867                                                            action_flags[i],
17868                                                            attr, &flow_err);
17869                                 if (ret < 0)
17870                                         return -rte_mtr_error_set(error,
17871                                         ENOTSUP,
17872                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17873                                         NULL, flow_err.message ?
17874                                         flow_err.message :
17875                                         "Mark action validate check fail");
17876                                 if (dev_conf->dv_xmeta_en !=
17877                                         MLX5_XMETA_MODE_LEGACY)
17878                                         return -rte_mtr_error_set(error,
17879                                         ENOTSUP,
17880                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17881                                         NULL, "Extend MARK action is "
17882                                         "not supported. Please try use "
17883                                         "default policy for meter.");
17884                                 action_flags[i] |= MLX5_FLOW_ACTION_MARK;
17885                                 ++actions_n;
17886                                 break;
17887                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
17888                                 ret = flow_dv_validate_action_set_tag(dev,
17889                                                         act, action_flags[i],
17890                                                         attr, &flow_err);
17891                                 if (ret)
17892                                         return -rte_mtr_error_set(error,
17893                                         ENOTSUP,
17894                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17895                                         NULL, flow_err.message ?
17896                                         flow_err.message :
17897                                         "Set tag action validate check fail");
17898                                 action_flags[i] |= MLX5_FLOW_ACTION_SET_TAG;
17899                                 ++actions_n;
17900                                 break;
17901                         case RTE_FLOW_ACTION_TYPE_DROP:
17902                                 ret = mlx5_flow_validate_action_drop
17903                                         (action_flags[i], attr, &flow_err);
17904                                 if (ret < 0)
17905                                         return -rte_mtr_error_set(error,
17906                                         ENOTSUP,
17907                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17908                                         NULL, flow_err.message ?
17909                                         flow_err.message :
17910                                         "Drop action validate check fail");
17911                                 action_flags[i] |= MLX5_FLOW_ACTION_DROP;
17912                                 ++actions_n;
17913                                 break;
17914                         case RTE_FLOW_ACTION_TYPE_QUEUE:
17915                                 /*
17916                                  * Check whether extensive
17917                                  * metadata feature is engaged.
17918                                  */
17919                                 if (dev_conf->dv_flow_en &&
17920                                     (dev_conf->dv_xmeta_en !=
17921                                      MLX5_XMETA_MODE_LEGACY) &&
17922                                     mlx5_flow_ext_mreg_supported(dev))
17923                                         return -rte_mtr_error_set(error,
17924                                           ENOTSUP,
17925                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17926                                           NULL, "Queue action with meta "
17927                                           "is not supported. Please try use "
17928                                           "default policy for meter.");
17929                                 ret = mlx5_flow_validate_action_queue(act,
17930                                                         action_flags[i], dev,
17931                                                         attr, &flow_err);
17932                                 if (ret < 0)
17933                                         return -rte_mtr_error_set(error,
17934                                           ENOTSUP,
17935                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17936                                           NULL, flow_err.message ?
17937                                           flow_err.message :
17938                                           "Queue action validate check fail");
17939                                 action_flags[i] |= MLX5_FLOW_ACTION_QUEUE;
17940                                 ++actions_n;
17941                                 break;
17942                         case RTE_FLOW_ACTION_TYPE_RSS:
17943                                 if (dev_conf->dv_flow_en &&
17944                                     (dev_conf->dv_xmeta_en !=
17945                                      MLX5_XMETA_MODE_LEGACY) &&
17946                                     mlx5_flow_ext_mreg_supported(dev))
17947                                         return -rte_mtr_error_set(error,
17948                                           ENOTSUP,
17949                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17950                                           NULL, "RSS action with meta "
17951                                           "is not supported. Please try use "
17952                                           "default policy for meter.");
17953                                 ret = mlx5_validate_action_rss(dev, act,
17954                                                                &flow_err);
17955                                 if (ret < 0)
17956                                         return -rte_mtr_error_set(error,
17957                                           ENOTSUP,
17958                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17959                                           NULL, flow_err.message ?
17960                                           flow_err.message :
17961                                           "RSS action validate check fail");
17962                                 action_flags[i] |= MLX5_FLOW_ACTION_RSS;
17963                                 ++actions_n;
17964                                 /* Either G or Y will set the RSS. */
17965                                 rss_color[i] = act->conf;
17966                                 break;
17967                         case RTE_FLOW_ACTION_TYPE_JUMP:
17968                                 ret = flow_dv_validate_action_jump(dev,
17969                                         NULL, act, action_flags[i],
17970                                         attr, true, &flow_err);
17971                                 if (ret)
17972                                         return -rte_mtr_error_set(error,
17973                                           ENOTSUP,
17974                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17975                                           NULL, flow_err.message ?
17976                                           flow_err.message :
17977                                           "Jump action validate check fail");
17978                                 ++actions_n;
17979                                 action_flags[i] |= MLX5_FLOW_ACTION_JUMP;
17980                                 break;
17981                         /*
17982                          * Only the last meter in the hierarchy will support
17983                          * the YELLOW color steering. Then in the meter policy
17984                          * actions list, there should be no other meter inside.
17985                          */
17986                         case RTE_FLOW_ACTION_TYPE_METER:
17987                                 if (i != RTE_COLOR_GREEN)
17988                                         return -rte_mtr_error_set(error,
17989                                                 ENOTSUP,
17990                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17991                                                 NULL,
17992                                                 "Meter hierarchy only supports GREEN color.");
17993                                 if (*policy_mode != MLX5_MTR_POLICY_MODE_OG)
17994                                         return -rte_mtr_error_set(error,
17995                                                 ENOTSUP,
17996                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17997                                                 NULL,
17998                                                 "No yellow policy should be provided in meter hierarchy.");
17999                                 mtr = act->conf;
18000                                 ret = flow_dv_validate_policy_mtr_hierarchy(dev,
18001                                                         mtr->mtr_id,
18002                                                         action_flags[i],
18003                                                         is_rss,
18004                                                         &hierarchy_domain,
18005                                                         error);
18006                                 if (ret)
18007                                         return ret;
18008                                 ++actions_n;
18009                                 action_flags[i] |=
18010                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
18011                                 break;
18012                         default:
18013                                 return -rte_mtr_error_set(error, ENOTSUP,
18014                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18015                                         NULL,
18016                                         "Doesn't support optional action");
18017                         }
18018                 }
18019                 if (action_flags[i] & MLX5_FLOW_ACTION_PORT_ID) {
18020                         domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
18021                 } else if ((action_flags[i] &
18022                           (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
18023                           (action_flags[i] & MLX5_FLOW_ACTION_MARK)) {
18024                         /*
18025                          * Only support MLX5_XMETA_MODE_LEGACY
18026                          * so MARK action is only in ingress domain.
18027                          */
18028                         domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
18029                 } else {
18030                         domain_color[i] = def_domain;
18031                         if (action_flags[i] &&
18032                             !(action_flags[i] & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
18033                                 domain_color[i] &=
18034                                 ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
18035                 }
18036                 if (action_flags[i] &
18037                     MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
18038                         domain_color[i] &= hierarchy_domain;
18039                 /*
18040                  * Non-termination actions only support NIC Tx domain.
18041                  * The adjustion should be skipped when there is no
18042                  * action or only END is provided. The default domains
18043                  * bit-mask is set to find the MIN intersection.
18044                  * The action flags checking should also be skipped.
18045                  */
18046                 if ((def_green && i == RTE_COLOR_GREEN) ||
18047                     (def_yellow && i == RTE_COLOR_YELLOW))
18048                         continue;
18049                 /*
18050                  * Validate the drop action mutual exclusion
18051                  * with other actions. Drop action is mutually-exclusive
18052                  * with any other action, except for Count action.
18053                  */
18054                 if ((action_flags[i] & MLX5_FLOW_ACTION_DROP) &&
18055                     (action_flags[i] & ~MLX5_FLOW_ACTION_DROP)) {
18056                         return -rte_mtr_error_set(error, ENOTSUP,
18057                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
18058                                 NULL, "Drop action is mutually-exclusive "
18059                                 "with any other action");
18060                 }
18061                 /* Eswitch has few restrictions on using items and actions */
18062                 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
18063                         if (!mlx5_flow_ext_mreg_supported(dev) &&
18064                             action_flags[i] & MLX5_FLOW_ACTION_MARK)
18065                                 return -rte_mtr_error_set(error, ENOTSUP,
18066                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18067                                         NULL, "unsupported action MARK");
18068                         if (action_flags[i] & MLX5_FLOW_ACTION_QUEUE)
18069                                 return -rte_mtr_error_set(error, ENOTSUP,
18070                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18071                                         NULL, "unsupported action QUEUE");
18072                         if (action_flags[i] & MLX5_FLOW_ACTION_RSS)
18073                                 return -rte_mtr_error_set(error, ENOTSUP,
18074                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18075                                         NULL, "unsupported action RSS");
18076                         if (!(action_flags[i] & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
18077                                 return -rte_mtr_error_set(error, ENOTSUP,
18078                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18079                                         NULL, "no fate action is found");
18080                 } else {
18081                         if (!(action_flags[i] & MLX5_FLOW_FATE_ACTIONS) &&
18082                             (domain_color[i] & MLX5_MTR_DOMAIN_INGRESS_BIT)) {
18083                                 if ((domain_color[i] &
18084                                      MLX5_MTR_DOMAIN_EGRESS_BIT))
18085                                         domain_color[i] =
18086                                                 MLX5_MTR_DOMAIN_EGRESS_BIT;
18087                                 else
18088                                         return -rte_mtr_error_set(error,
18089                                                 ENOTSUP,
18090                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
18091                                                 NULL,
18092                                                 "no fate action is found");
18093                         }
18094                 }
18095         }
18096         /* If both colors have RSS, the attributes should be the same. */
18097         if (flow_dv_mtr_policy_rss_compare(rss_color[RTE_COLOR_GREEN],
18098                                            rss_color[RTE_COLOR_YELLOW]))
18099                 return -rte_mtr_error_set(error, EINVAL,
18100                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
18101                                           NULL, "policy RSS attr conflict");
18102         if (rss_color[RTE_COLOR_GREEN] || rss_color[RTE_COLOR_YELLOW])
18103                 *is_rss = true;
18104         /* "domain_color[C]" is non-zero for each color, default is ALL. */
18105         if (!def_green && !def_yellow &&
18106             domain_color[RTE_COLOR_GREEN] != domain_color[RTE_COLOR_YELLOW] &&
18107             !(action_flags[RTE_COLOR_GREEN] & MLX5_FLOW_ACTION_DROP) &&
18108             !(action_flags[RTE_COLOR_YELLOW] & MLX5_FLOW_ACTION_DROP))
18109                 return -rte_mtr_error_set(error, EINVAL,
18110                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
18111                                           NULL, "policy domains conflict");
18112         /*
18113          * At least one color policy is listed in the actions, the domains
18114          * to be supported should be the intersection.
18115          */
18116         *domain_bitmap = domain_color[RTE_COLOR_GREEN] &
18117                          domain_color[RTE_COLOR_YELLOW];
18118         return 0;
18119 }
18120
18121 static int
18122 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
18123 {
18124         struct mlx5_priv *priv = dev->data->dev_private;
18125         int ret = 0;
18126
18127         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
18128                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
18129                                                 flags);
18130                 if (ret != 0)
18131                         return ret;
18132         }
18133         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
18134                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
18135                 if (ret != 0)
18136                         return ret;
18137         }
18138         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
18139                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
18140                 if (ret != 0)
18141                         return ret;
18142         }
18143         return 0;
18144 }
18145
18146 /**
18147  * Discover the number of available flow priorities
18148  * by trying to create a flow with the highest priority value
18149  * for each possible number.
18150  *
18151  * @param[in] dev
18152  *   Ethernet device.
18153  * @param[in] vprio
18154  *   List of possible number of available priorities.
18155  * @param[in] vprio_n
18156  *   Size of @p vprio array.
18157  * @return
18158  *   On success, number of available flow priorities.
18159  *   On failure, a negative errno-style code and rte_errno is set.
18160  */
18161 static int
18162 flow_dv_discover_priorities(struct rte_eth_dev *dev,
18163                             const uint16_t *vprio, int vprio_n)
18164 {
18165         struct mlx5_priv *priv = dev->data->dev_private;
18166         struct mlx5_indexed_pool *pool = priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW];
18167         struct rte_flow_item_eth eth;
18168         struct rte_flow_item item = {
18169                 .type = RTE_FLOW_ITEM_TYPE_ETH,
18170                 .spec = &eth,
18171                 .mask = &eth,
18172         };
18173         struct mlx5_flow_dv_matcher matcher = {
18174                 .mask = {
18175                         .size = sizeof(matcher.mask.buf),
18176                 },
18177         };
18178         union mlx5_flow_tbl_key tbl_key;
18179         struct mlx5_flow flow;
18180         void *action;
18181         struct rte_flow_error error;
18182         uint8_t misc_mask;
18183         int i, err, ret = -ENOTSUP;
18184
18185         /*
18186          * Prepare a flow with a catch-all pattern and a drop action.
18187          * Use drop queue, because shared drop action may be unavailable.
18188          */
18189         action = priv->drop_queue.hrxq->action;
18190         if (action == NULL) {
18191                 DRV_LOG(ERR, "Priority discovery requires a drop action");
18192                 rte_errno = ENOTSUP;
18193                 return -rte_errno;
18194         }
18195         memset(&flow, 0, sizeof(flow));
18196         flow.handle = mlx5_ipool_zmalloc(pool, &flow.handle_idx);
18197         if (flow.handle == NULL) {
18198                 DRV_LOG(ERR, "Cannot create flow handle");
18199                 rte_errno = ENOMEM;
18200                 return -rte_errno;
18201         }
18202         flow.ingress = true;
18203         flow.dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
18204         flow.dv.actions[0] = action;
18205         flow.dv.actions_n = 1;
18206         memset(&eth, 0, sizeof(eth));
18207         flow_dv_translate_item_eth(matcher.mask.buf, flow.dv.value.buf,
18208                                    &item, /* inner */ false, /* group */ 0);
18209         matcher.crc = rte_raw_cksum(matcher.mask.buf, matcher.mask.size);
18210         for (i = 0; i < vprio_n; i++) {
18211                 /* Configure the next proposed maximum priority. */
18212                 matcher.priority = vprio[i] - 1;
18213                 memset(&tbl_key, 0, sizeof(tbl_key));
18214                 err = flow_dv_matcher_register(dev, &matcher, &tbl_key, &flow,
18215                                                /* tunnel */ NULL,
18216                                                /* group */ 0,
18217                                                &error);
18218                 if (err != 0) {
18219                         /* This action is pure SW and must always succeed. */
18220                         DRV_LOG(ERR, "Cannot register matcher");
18221                         ret = -rte_errno;
18222                         break;
18223                 }
18224                 /* Try to apply the flow to HW. */
18225                 misc_mask = flow_dv_matcher_enable(flow.dv.value.buf);
18226                 __flow_dv_adjust_buf_size(&flow.dv.value.size, misc_mask);
18227                 err = mlx5_flow_os_create_flow
18228                                 (flow.handle->dvh.matcher->matcher_object,
18229                                  (void *)&flow.dv.value, flow.dv.actions_n,
18230                                  flow.dv.actions, &flow.handle->drv_flow);
18231                 if (err == 0) {
18232                         claim_zero(mlx5_flow_os_destroy_flow
18233                                                 (flow.handle->drv_flow));
18234                         flow.handle->drv_flow = NULL;
18235                 }
18236                 claim_zero(flow_dv_matcher_release(dev, flow.handle));
18237                 if (err != 0)
18238                         break;
18239                 ret = vprio[i];
18240         }
18241         mlx5_ipool_free(pool, flow.handle_idx);
18242         /* Set rte_errno if no expected priority value matched. */
18243         if (ret < 0)
18244                 rte_errno = -ret;
18245         return ret;
18246 }
18247
18248 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
18249         .validate = flow_dv_validate,
18250         .prepare = flow_dv_prepare,
18251         .translate = flow_dv_translate,
18252         .apply = flow_dv_apply,
18253         .remove = flow_dv_remove,
18254         .destroy = flow_dv_destroy,
18255         .query = flow_dv_query,
18256         .create_mtr_tbls = flow_dv_create_mtr_tbls,
18257         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
18258         .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
18259         .create_meter = flow_dv_mtr_alloc,
18260         .free_meter = flow_dv_aso_mtr_release_to_pool,
18261         .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
18262         .create_mtr_acts = flow_dv_create_mtr_policy_acts,
18263         .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
18264         .create_policy_rules = flow_dv_create_policy_rules,
18265         .destroy_policy_rules = flow_dv_destroy_policy_rules,
18266         .create_def_policy = flow_dv_create_def_policy,
18267         .destroy_def_policy = flow_dv_destroy_def_policy,
18268         .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
18269         .meter_hierarchy_rule_create = flow_dv_meter_hierarchy_rule_create,
18270         .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
18271         .counter_alloc = flow_dv_counter_allocate,
18272         .counter_free = flow_dv_counter_free,
18273         .counter_query = flow_dv_counter_query,
18274         .get_aged_flows = flow_dv_get_aged_flows,
18275         .action_validate = flow_dv_action_validate,
18276         .action_create = flow_dv_action_create,
18277         .action_destroy = flow_dv_action_destroy,
18278         .action_update = flow_dv_action_update,
18279         .action_query = flow_dv_action_query,
18280         .sync_domain = flow_dv_sync_domain,
18281         .discover_priorities = flow_dv_discover_priorities,
18282         .item_create = flow_dv_item_create,
18283         .item_release = flow_dv_item_release,
18284 };
18285
18286 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
18287