0beaf7b62dcf4ae45a643de82ba9aecb42fdef00
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ip.h>
20 #include <rte_gre.h>
21 #include <rte_vxlan.h>
22 #include <rte_gtp.h>
23 #include <rte_eal_paging.h>
24 #include <rte_mpls.h>
25 #include <rte_mtr.h>
26 #include <rte_mtr_driver.h>
27 #include <rte_tailq.h>
28
29 #include <mlx5_glue.h>
30 #include <mlx5_devx_cmds.h>
31 #include <mlx5_prm.h>
32 #include <mlx5_malloc.h>
33
34 #include "mlx5_defs.h"
35 #include "mlx5.h"
36 #include "mlx5_common_os.h"
37 #include "mlx5_flow.h"
38 #include "mlx5_flow_os.h"
39 #include "mlx5_rx.h"
40 #include "mlx5_tx.h"
41 #include "rte_pmd_mlx5.h"
42
43 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
44
45 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
46 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
47 #endif
48
49 #ifndef HAVE_MLX5DV_DR_ESWITCH
50 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
51 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
52 #endif
53 #endif
54
55 #ifndef HAVE_MLX5DV_DR
56 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
57 #endif
58
59 /* VLAN header definitions */
60 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
61 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
62 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
63 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
64 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
65
66 union flow_dv_attr {
67         struct {
68                 uint32_t valid:1;
69                 uint32_t ipv4:1;
70                 uint32_t ipv6:1;
71                 uint32_t tcp:1;
72                 uint32_t udp:1;
73                 uint32_t reserved:27;
74         };
75         uint32_t attr;
76 };
77
78 static int
79 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
80                              struct mlx5_flow_tbl_resource *tbl);
81
82 static int
83 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
84                                      uint32_t encap_decap_idx);
85
86 static int
87 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
88                                         uint32_t port_id);
89 static void
90 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
91
92 static int
93 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
94                                   uint32_t rix_jump);
95
96 static int16_t
97 flow_dv_get_esw_manager_vport_id(struct rte_eth_dev *dev)
98 {
99         struct mlx5_priv *priv = dev->data->dev_private;
100
101         if (priv->pci_dev == NULL)
102                 return 0;
103         switch (priv->pci_dev->id.device_id) {
104         case PCI_DEVICE_ID_MELLANOX_CONNECTX5BF:
105         case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF:
106         case PCI_DEVICE_ID_MELLANOX_CONNECTX7BF:
107                 return (int16_t)0xfffe;
108         default:
109                 return 0;
110         }
111 }
112
113 /**
114  * Initialize flow attributes structure according to flow items' types.
115  *
116  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
117  * mode. For tunnel mode, the items to be modified are the outermost ones.
118  *
119  * @param[in] item
120  *   Pointer to item specification.
121  * @param[out] attr
122  *   Pointer to flow attributes structure.
123  * @param[in] dev_flow
124  *   Pointer to the sub flow.
125  * @param[in] tunnel_decap
126  *   Whether action is after tunnel decapsulation.
127  */
128 static void
129 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
130                   struct mlx5_flow *dev_flow, bool tunnel_decap)
131 {
132         uint64_t layers = dev_flow->handle->layers;
133
134         /*
135          * If layers is already initialized, it means this dev_flow is the
136          * suffix flow, the layers flags is set by the prefix flow. Need to
137          * use the layer flags from prefix flow as the suffix flow may not
138          * have the user defined items as the flow is split.
139          */
140         if (layers) {
141                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
142                         attr->ipv4 = 1;
143                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
144                         attr->ipv6 = 1;
145                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
146                         attr->tcp = 1;
147                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
148                         attr->udp = 1;
149                 attr->valid = 1;
150                 return;
151         }
152         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
153                 uint8_t next_protocol = 0xff;
154                 switch (item->type) {
155                 case RTE_FLOW_ITEM_TYPE_GRE:
156                 case RTE_FLOW_ITEM_TYPE_NVGRE:
157                 case RTE_FLOW_ITEM_TYPE_VXLAN:
158                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
159                 case RTE_FLOW_ITEM_TYPE_GENEVE:
160                 case RTE_FLOW_ITEM_TYPE_MPLS:
161                         if (tunnel_decap)
162                                 attr->attr = 0;
163                         break;
164                 case RTE_FLOW_ITEM_TYPE_IPV4:
165                         if (!attr->ipv6)
166                                 attr->ipv4 = 1;
167                         if (item->mask != NULL &&
168                             ((const struct rte_flow_item_ipv4 *)
169                             item->mask)->hdr.next_proto_id)
170                                 next_protocol =
171                                     ((const struct rte_flow_item_ipv4 *)
172                                       (item->spec))->hdr.next_proto_id &
173                                     ((const struct rte_flow_item_ipv4 *)
174                                       (item->mask))->hdr.next_proto_id;
175                         if ((next_protocol == IPPROTO_IPIP ||
176                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
177                                 attr->attr = 0;
178                         break;
179                 case RTE_FLOW_ITEM_TYPE_IPV6:
180                         if (!attr->ipv4)
181                                 attr->ipv6 = 1;
182                         if (item->mask != NULL &&
183                             ((const struct rte_flow_item_ipv6 *)
184                             item->mask)->hdr.proto)
185                                 next_protocol =
186                                     ((const struct rte_flow_item_ipv6 *)
187                                       (item->spec))->hdr.proto &
188                                     ((const struct rte_flow_item_ipv6 *)
189                                       (item->mask))->hdr.proto;
190                         if ((next_protocol == IPPROTO_IPIP ||
191                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
192                                 attr->attr = 0;
193                         break;
194                 case RTE_FLOW_ITEM_TYPE_UDP:
195                         if (!attr->tcp)
196                                 attr->udp = 1;
197                         break;
198                 case RTE_FLOW_ITEM_TYPE_TCP:
199                         if (!attr->udp)
200                                 attr->tcp = 1;
201                         break;
202                 default:
203                         break;
204                 }
205         }
206         attr->valid = 1;
207 }
208
209 /*
210  * Convert rte_mtr_color to mlx5 color.
211  *
212  * @param[in] rcol
213  *   rte_mtr_color.
214  *
215  * @return
216  *   mlx5 color.
217  */
218 static inline int
219 rte_col_2_mlx5_col(enum rte_color rcol)
220 {
221         switch (rcol) {
222         case RTE_COLOR_GREEN:
223                 return MLX5_FLOW_COLOR_GREEN;
224         case RTE_COLOR_YELLOW:
225                 return MLX5_FLOW_COLOR_YELLOW;
226         case RTE_COLOR_RED:
227                 return MLX5_FLOW_COLOR_RED;
228         default:
229                 break;
230         }
231         return MLX5_FLOW_COLOR_UNDEFINED;
232 }
233
234 struct field_modify_info {
235         uint32_t size; /* Size of field in protocol header, in bytes. */
236         uint32_t offset; /* Offset of field in protocol header, in bytes. */
237         enum mlx5_modification_field id;
238 };
239
240 struct field_modify_info modify_eth[] = {
241         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
242         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
243         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
244         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
245         {0, 0, 0},
246 };
247
248 struct field_modify_info modify_vlan_out_first_vid[] = {
249         /* Size in bits !!! */
250         {12, 0, MLX5_MODI_OUT_FIRST_VID},
251         {0, 0, 0},
252 };
253
254 struct field_modify_info modify_ipv4[] = {
255         {1,  1, MLX5_MODI_OUT_IP_DSCP},
256         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
257         {4, 12, MLX5_MODI_OUT_SIPV4},
258         {4, 16, MLX5_MODI_OUT_DIPV4},
259         {0, 0, 0},
260 };
261
262 struct field_modify_info modify_ipv6[] = {
263         {1,  0, MLX5_MODI_OUT_IP_DSCP},
264         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
265         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
266         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
267         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
268         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
269         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
270         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
271         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
272         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
273         {0, 0, 0},
274 };
275
276 struct field_modify_info modify_udp[] = {
277         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
278         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
279         {0, 0, 0},
280 };
281
282 struct field_modify_info modify_tcp[] = {
283         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
284         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
285         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
286         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
287         {0, 0, 0},
288 };
289
290 static void
291 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
292                           uint8_t next_protocol, uint64_t *item_flags,
293                           int *tunnel)
294 {
295         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
296                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
297         if (next_protocol == IPPROTO_IPIP) {
298                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
299                 *tunnel = 1;
300         }
301         if (next_protocol == IPPROTO_IPV6) {
302                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
303                 *tunnel = 1;
304         }
305 }
306
307 static inline struct mlx5_hlist *
308 flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,
309                      const char *name, uint32_t size, bool direct_key,
310                      bool lcores_share, void *ctx,
311                      mlx5_list_create_cb cb_create,
312                      mlx5_list_match_cb cb_match,
313                      mlx5_list_remove_cb cb_remove,
314                      mlx5_list_clone_cb cb_clone,
315                      mlx5_list_clone_free_cb cb_clone_free)
316 {
317         struct mlx5_hlist *hl;
318         struct mlx5_hlist *expected = NULL;
319         char s[MLX5_NAME_SIZE];
320
321         hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
322         if (likely(hl))
323                 return hl;
324         snprintf(s, sizeof(s), "%s_%s", sh->ibdev_name, name);
325         hl = mlx5_hlist_create(s, size, direct_key, lcores_share,
326                         ctx, cb_create, cb_match, cb_remove, cb_clone,
327                         cb_clone_free);
328         if (!hl) {
329                 DRV_LOG(ERR, "%s hash creation failed", name);
330                 rte_errno = ENOMEM;
331                 return NULL;
332         }
333         if (!__atomic_compare_exchange_n(phl, &expected, hl, false,
334                                          __ATOMIC_SEQ_CST,
335                                          __ATOMIC_SEQ_CST)) {
336                 mlx5_hlist_destroy(hl);
337                 hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
338         }
339         return hl;
340 }
341
342 /* Update VLAN's VID/PCP based on input rte_flow_action.
343  *
344  * @param[in] action
345  *   Pointer to struct rte_flow_action.
346  * @param[out] vlan
347  *   Pointer to struct rte_vlan_hdr.
348  */
349 static void
350 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
351                          struct rte_vlan_hdr *vlan)
352 {
353         uint16_t vlan_tci;
354         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
355                 vlan_tci =
356                     ((const struct rte_flow_action_of_set_vlan_pcp *)
357                                                action->conf)->vlan_pcp;
358                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
359                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
360                 vlan->vlan_tci |= vlan_tci;
361         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
362                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
363                 vlan->vlan_tci |= rte_be_to_cpu_16
364                     (((const struct rte_flow_action_of_set_vlan_vid *)
365                                              action->conf)->vlan_vid);
366         }
367 }
368
369 /**
370  * Fetch 1, 2, 3 or 4 byte field from the byte array
371  * and return as unsigned integer in host-endian format.
372  *
373  * @param[in] data
374  *   Pointer to data array.
375  * @param[in] size
376  *   Size of field to extract.
377  *
378  * @return
379  *   converted field in host endian format.
380  */
381 static inline uint32_t
382 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
383 {
384         uint32_t ret;
385
386         switch (size) {
387         case 1:
388                 ret = *data;
389                 break;
390         case 2:
391                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
392                 break;
393         case 3:
394                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
395                 ret = (ret << 8) | *(data + sizeof(uint16_t));
396                 break;
397         case 4:
398                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
399                 break;
400         default:
401                 MLX5_ASSERT(false);
402                 ret = 0;
403                 break;
404         }
405         return ret;
406 }
407
408 /**
409  * Convert modify-header action to DV specification.
410  *
411  * Data length of each action is determined by provided field description
412  * and the item mask. Data bit offset and width of each action is determined
413  * by provided item mask.
414  *
415  * @param[in] item
416  *   Pointer to item specification.
417  * @param[in] field
418  *   Pointer to field modification information.
419  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
420  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
421  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
422  * @param[in] dcopy
423  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
424  *   Negative offset value sets the same offset as source offset.
425  *   size field is ignored, value is taken from source field.
426  * @param[in,out] resource
427  *   Pointer to the modify-header resource.
428  * @param[in] type
429  *   Type of modification.
430  * @param[out] error
431  *   Pointer to the error structure.
432  *
433  * @return
434  *   0 on success, a negative errno value otherwise and rte_errno is set.
435  */
436 static int
437 flow_dv_convert_modify_action(struct rte_flow_item *item,
438                               struct field_modify_info *field,
439                               struct field_modify_info *dcopy,
440                               struct mlx5_flow_dv_modify_hdr_resource *resource,
441                               uint32_t type, struct rte_flow_error *error)
442 {
443         uint32_t i = resource->actions_num;
444         struct mlx5_modification_cmd *actions = resource->actions;
445         uint32_t carry_b = 0;
446
447         /*
448          * The item and mask are provided in big-endian format.
449          * The fields should be presented as in big-endian format either.
450          * Mask must be always present, it defines the actual field width.
451          */
452         MLX5_ASSERT(item->mask);
453         MLX5_ASSERT(field->size);
454         do {
455                 uint32_t size_b;
456                 uint32_t off_b;
457                 uint32_t mask;
458                 uint32_t data;
459                 bool next_field = true;
460                 bool next_dcopy = true;
461
462                 if (i >= MLX5_MAX_MODIFY_NUM)
463                         return rte_flow_error_set(error, EINVAL,
464                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
465                                  "too many items to modify");
466                 /* Fetch variable byte size mask from the array. */
467                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
468                                            field->offset, field->size);
469                 if (!mask) {
470                         ++field;
471                         continue;
472                 }
473                 /* Deduce actual data width in bits from mask value. */
474                 off_b = rte_bsf32(mask) + carry_b;
475                 size_b = sizeof(uint32_t) * CHAR_BIT -
476                          off_b - __builtin_clz(mask);
477                 MLX5_ASSERT(size_b);
478                 actions[i] = (struct mlx5_modification_cmd) {
479                         .action_type = type,
480                         .field = field->id,
481                         .offset = off_b,
482                         .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ?
483                                 0 : size_b,
484                 };
485                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
486                         MLX5_ASSERT(dcopy);
487                         actions[i].dst_field = dcopy->id;
488                         actions[i].dst_offset =
489                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
490                         /* Convert entire record to big-endian format. */
491                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
492                         /*
493                          * Destination field overflow. Copy leftovers of
494                          * a source field to the next destination field.
495                          */
496                         carry_b = 0;
497                         if ((size_b > dcopy->size * CHAR_BIT - dcopy->offset) &&
498                             dcopy->size != 0) {
499                                 actions[i].length =
500                                         dcopy->size * CHAR_BIT - dcopy->offset;
501                                 carry_b = actions[i].length;
502                                 next_field = false;
503                         }
504                         /*
505                          * Not enough bits in a source filed to fill a
506                          * destination field. Switch to the next source.
507                          */
508                         if ((size_b < dcopy->size * CHAR_BIT - dcopy->offset) &&
509                             (size_b == field->size * CHAR_BIT - off_b)) {
510                                 actions[i].length =
511                                         field->size * CHAR_BIT - off_b;
512                                 dcopy->offset += actions[i].length;
513                                 next_dcopy = false;
514                         }
515                         if (next_dcopy)
516                                 ++dcopy;
517                 } else {
518                         MLX5_ASSERT(item->spec);
519                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
520                                                    field->offset, field->size);
521                         /* Shift out the trailing masked bits from data. */
522                         data = (data & mask) >> off_b;
523                         actions[i].data1 = rte_cpu_to_be_32(data);
524                 }
525                 /* Convert entire record to expected big-endian format. */
526                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
527                 if (next_field)
528                         ++field;
529                 ++i;
530         } while (field->size);
531         if (resource->actions_num == i)
532                 return rte_flow_error_set(error, EINVAL,
533                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
534                                           "invalid modification flow item");
535         resource->actions_num = i;
536         return 0;
537 }
538
539 /**
540  * Convert modify-header set IPv4 address action to DV specification.
541  *
542  * @param[in,out] resource
543  *   Pointer to the modify-header resource.
544  * @param[in] action
545  *   Pointer to action specification.
546  * @param[out] error
547  *   Pointer to the error structure.
548  *
549  * @return
550  *   0 on success, a negative errno value otherwise and rte_errno is set.
551  */
552 static int
553 flow_dv_convert_action_modify_ipv4
554                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
555                          const struct rte_flow_action *action,
556                          struct rte_flow_error *error)
557 {
558         const struct rte_flow_action_set_ipv4 *conf =
559                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
560         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
561         struct rte_flow_item_ipv4 ipv4;
562         struct rte_flow_item_ipv4 ipv4_mask;
563
564         memset(&ipv4, 0, sizeof(ipv4));
565         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
566         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
567                 ipv4.hdr.src_addr = conf->ipv4_addr;
568                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
569         } else {
570                 ipv4.hdr.dst_addr = conf->ipv4_addr;
571                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
572         }
573         item.spec = &ipv4;
574         item.mask = &ipv4_mask;
575         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
576                                              MLX5_MODIFICATION_TYPE_SET, error);
577 }
578
579 /**
580  * Convert modify-header set IPv6 address action to DV specification.
581  *
582  * @param[in,out] resource
583  *   Pointer to the modify-header resource.
584  * @param[in] action
585  *   Pointer to action specification.
586  * @param[out] error
587  *   Pointer to the error structure.
588  *
589  * @return
590  *   0 on success, a negative errno value otherwise and rte_errno is set.
591  */
592 static int
593 flow_dv_convert_action_modify_ipv6
594                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
595                          const struct rte_flow_action *action,
596                          struct rte_flow_error *error)
597 {
598         const struct rte_flow_action_set_ipv6 *conf =
599                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
600         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
601         struct rte_flow_item_ipv6 ipv6;
602         struct rte_flow_item_ipv6 ipv6_mask;
603
604         memset(&ipv6, 0, sizeof(ipv6));
605         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
606         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
607                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
608                        sizeof(ipv6.hdr.src_addr));
609                 memcpy(&ipv6_mask.hdr.src_addr,
610                        &rte_flow_item_ipv6_mask.hdr.src_addr,
611                        sizeof(ipv6.hdr.src_addr));
612         } else {
613                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
614                        sizeof(ipv6.hdr.dst_addr));
615                 memcpy(&ipv6_mask.hdr.dst_addr,
616                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
617                        sizeof(ipv6.hdr.dst_addr));
618         }
619         item.spec = &ipv6;
620         item.mask = &ipv6_mask;
621         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
622                                              MLX5_MODIFICATION_TYPE_SET, error);
623 }
624
625 /**
626  * Convert modify-header set MAC address action to DV specification.
627  *
628  * @param[in,out] resource
629  *   Pointer to the modify-header resource.
630  * @param[in] action
631  *   Pointer to action specification.
632  * @param[out] error
633  *   Pointer to the error structure.
634  *
635  * @return
636  *   0 on success, a negative errno value otherwise and rte_errno is set.
637  */
638 static int
639 flow_dv_convert_action_modify_mac
640                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
641                          const struct rte_flow_action *action,
642                          struct rte_flow_error *error)
643 {
644         const struct rte_flow_action_set_mac *conf =
645                 (const struct rte_flow_action_set_mac *)(action->conf);
646         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
647         struct rte_flow_item_eth eth;
648         struct rte_flow_item_eth eth_mask;
649
650         memset(&eth, 0, sizeof(eth));
651         memset(&eth_mask, 0, sizeof(eth_mask));
652         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
653                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
654                        sizeof(eth.src.addr_bytes));
655                 memcpy(&eth_mask.src.addr_bytes,
656                        &rte_flow_item_eth_mask.src.addr_bytes,
657                        sizeof(eth_mask.src.addr_bytes));
658         } else {
659                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
660                        sizeof(eth.dst.addr_bytes));
661                 memcpy(&eth_mask.dst.addr_bytes,
662                        &rte_flow_item_eth_mask.dst.addr_bytes,
663                        sizeof(eth_mask.dst.addr_bytes));
664         }
665         item.spec = &eth;
666         item.mask = &eth_mask;
667         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
668                                              MLX5_MODIFICATION_TYPE_SET, error);
669 }
670
671 /**
672  * Convert modify-header set VLAN VID action to DV specification.
673  *
674  * @param[in,out] resource
675  *   Pointer to the modify-header resource.
676  * @param[in] action
677  *   Pointer to action specification.
678  * @param[out] error
679  *   Pointer to the error structure.
680  *
681  * @return
682  *   0 on success, a negative errno value otherwise and rte_errno is set.
683  */
684 static int
685 flow_dv_convert_action_modify_vlan_vid
686                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
687                          const struct rte_flow_action *action,
688                          struct rte_flow_error *error)
689 {
690         const struct rte_flow_action_of_set_vlan_vid *conf =
691                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
692         int i = resource->actions_num;
693         struct mlx5_modification_cmd *actions = resource->actions;
694         struct field_modify_info *field = modify_vlan_out_first_vid;
695
696         if (i >= MLX5_MAX_MODIFY_NUM)
697                 return rte_flow_error_set(error, EINVAL,
698                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
699                          "too many items to modify");
700         actions[i] = (struct mlx5_modification_cmd) {
701                 .action_type = MLX5_MODIFICATION_TYPE_SET,
702                 .field = field->id,
703                 .length = field->size,
704                 .offset = field->offset,
705         };
706         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
707         actions[i].data1 = conf->vlan_vid;
708         actions[i].data1 = actions[i].data1 << 16;
709         resource->actions_num = ++i;
710         return 0;
711 }
712
713 /**
714  * Convert modify-header set TP action to DV specification.
715  *
716  * @param[in,out] resource
717  *   Pointer to the modify-header resource.
718  * @param[in] action
719  *   Pointer to action specification.
720  * @param[in] items
721  *   Pointer to rte_flow_item objects list.
722  * @param[in] attr
723  *   Pointer to flow attributes structure.
724  * @param[in] dev_flow
725  *   Pointer to the sub flow.
726  * @param[in] tunnel_decap
727  *   Whether action is after tunnel decapsulation.
728  * @param[out] error
729  *   Pointer to the error structure.
730  *
731  * @return
732  *   0 on success, a negative errno value otherwise and rte_errno is set.
733  */
734 static int
735 flow_dv_convert_action_modify_tp
736                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
737                          const struct rte_flow_action *action,
738                          const struct rte_flow_item *items,
739                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
740                          bool tunnel_decap, struct rte_flow_error *error)
741 {
742         const struct rte_flow_action_set_tp *conf =
743                 (const struct rte_flow_action_set_tp *)(action->conf);
744         struct rte_flow_item item;
745         struct rte_flow_item_udp udp;
746         struct rte_flow_item_udp udp_mask;
747         struct rte_flow_item_tcp tcp;
748         struct rte_flow_item_tcp tcp_mask;
749         struct field_modify_info *field;
750
751         if (!attr->valid)
752                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
753         if (attr->udp) {
754                 memset(&udp, 0, sizeof(udp));
755                 memset(&udp_mask, 0, sizeof(udp_mask));
756                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
757                         udp.hdr.src_port = conf->port;
758                         udp_mask.hdr.src_port =
759                                         rte_flow_item_udp_mask.hdr.src_port;
760                 } else {
761                         udp.hdr.dst_port = conf->port;
762                         udp_mask.hdr.dst_port =
763                                         rte_flow_item_udp_mask.hdr.dst_port;
764                 }
765                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
766                 item.spec = &udp;
767                 item.mask = &udp_mask;
768                 field = modify_udp;
769         } else {
770                 MLX5_ASSERT(attr->tcp);
771                 memset(&tcp, 0, sizeof(tcp));
772                 memset(&tcp_mask, 0, sizeof(tcp_mask));
773                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
774                         tcp.hdr.src_port = conf->port;
775                         tcp_mask.hdr.src_port =
776                                         rte_flow_item_tcp_mask.hdr.src_port;
777                 } else {
778                         tcp.hdr.dst_port = conf->port;
779                         tcp_mask.hdr.dst_port =
780                                         rte_flow_item_tcp_mask.hdr.dst_port;
781                 }
782                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
783                 item.spec = &tcp;
784                 item.mask = &tcp_mask;
785                 field = modify_tcp;
786         }
787         return flow_dv_convert_modify_action(&item, field, NULL, resource,
788                                              MLX5_MODIFICATION_TYPE_SET, error);
789 }
790
791 /**
792  * Convert modify-header set TTL action to DV specification.
793  *
794  * @param[in,out] resource
795  *   Pointer to the modify-header resource.
796  * @param[in] action
797  *   Pointer to action specification.
798  * @param[in] items
799  *   Pointer to rte_flow_item objects list.
800  * @param[in] attr
801  *   Pointer to flow attributes structure.
802  * @param[in] dev_flow
803  *   Pointer to the sub flow.
804  * @param[in] tunnel_decap
805  *   Whether action is after tunnel decapsulation.
806  * @param[out] error
807  *   Pointer to the error structure.
808  *
809  * @return
810  *   0 on success, a negative errno value otherwise and rte_errno is set.
811  */
812 static int
813 flow_dv_convert_action_modify_ttl
814                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
815                          const struct rte_flow_action *action,
816                          const struct rte_flow_item *items,
817                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
818                          bool tunnel_decap, struct rte_flow_error *error)
819 {
820         const struct rte_flow_action_set_ttl *conf =
821                 (const struct rte_flow_action_set_ttl *)(action->conf);
822         struct rte_flow_item item;
823         struct rte_flow_item_ipv4 ipv4;
824         struct rte_flow_item_ipv4 ipv4_mask;
825         struct rte_flow_item_ipv6 ipv6;
826         struct rte_flow_item_ipv6 ipv6_mask;
827         struct field_modify_info *field;
828
829         if (!attr->valid)
830                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
831         if (attr->ipv4) {
832                 memset(&ipv4, 0, sizeof(ipv4));
833                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
834                 ipv4.hdr.time_to_live = conf->ttl_value;
835                 ipv4_mask.hdr.time_to_live = 0xFF;
836                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
837                 item.spec = &ipv4;
838                 item.mask = &ipv4_mask;
839                 field = modify_ipv4;
840         } else {
841                 MLX5_ASSERT(attr->ipv6);
842                 memset(&ipv6, 0, sizeof(ipv6));
843                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
844                 ipv6.hdr.hop_limits = conf->ttl_value;
845                 ipv6_mask.hdr.hop_limits = 0xFF;
846                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
847                 item.spec = &ipv6;
848                 item.mask = &ipv6_mask;
849                 field = modify_ipv6;
850         }
851         return flow_dv_convert_modify_action(&item, field, NULL, resource,
852                                              MLX5_MODIFICATION_TYPE_SET, error);
853 }
854
855 /**
856  * Convert modify-header decrement TTL action to DV specification.
857  *
858  * @param[in,out] resource
859  *   Pointer to the modify-header resource.
860  * @param[in] action
861  *   Pointer to action specification.
862  * @param[in] items
863  *   Pointer to rte_flow_item objects list.
864  * @param[in] attr
865  *   Pointer to flow attributes structure.
866  * @param[in] dev_flow
867  *   Pointer to the sub flow.
868  * @param[in] tunnel_decap
869  *   Whether action is after tunnel decapsulation.
870  * @param[out] error
871  *   Pointer to the error structure.
872  *
873  * @return
874  *   0 on success, a negative errno value otherwise and rte_errno is set.
875  */
876 static int
877 flow_dv_convert_action_modify_dec_ttl
878                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
879                          const struct rte_flow_item *items,
880                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
881                          bool tunnel_decap, struct rte_flow_error *error)
882 {
883         struct rte_flow_item item;
884         struct rte_flow_item_ipv4 ipv4;
885         struct rte_flow_item_ipv4 ipv4_mask;
886         struct rte_flow_item_ipv6 ipv6;
887         struct rte_flow_item_ipv6 ipv6_mask;
888         struct field_modify_info *field;
889
890         if (!attr->valid)
891                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
892         if (attr->ipv4) {
893                 memset(&ipv4, 0, sizeof(ipv4));
894                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
895                 ipv4.hdr.time_to_live = 0xFF;
896                 ipv4_mask.hdr.time_to_live = 0xFF;
897                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
898                 item.spec = &ipv4;
899                 item.mask = &ipv4_mask;
900                 field = modify_ipv4;
901         } else {
902                 MLX5_ASSERT(attr->ipv6);
903                 memset(&ipv6, 0, sizeof(ipv6));
904                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
905                 ipv6.hdr.hop_limits = 0xFF;
906                 ipv6_mask.hdr.hop_limits = 0xFF;
907                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
908                 item.spec = &ipv6;
909                 item.mask = &ipv6_mask;
910                 field = modify_ipv6;
911         }
912         return flow_dv_convert_modify_action(&item, field, NULL, resource,
913                                              MLX5_MODIFICATION_TYPE_ADD, error);
914 }
915
916 /**
917  * Convert modify-header increment/decrement TCP Sequence number
918  * to DV specification.
919  *
920  * @param[in,out] resource
921  *   Pointer to the modify-header resource.
922  * @param[in] action
923  *   Pointer to action specification.
924  * @param[out] error
925  *   Pointer to the error structure.
926  *
927  * @return
928  *   0 on success, a negative errno value otherwise and rte_errno is set.
929  */
930 static int
931 flow_dv_convert_action_modify_tcp_seq
932                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
933                          const struct rte_flow_action *action,
934                          struct rte_flow_error *error)
935 {
936         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
937         uint64_t value = rte_be_to_cpu_32(*conf);
938         struct rte_flow_item item;
939         struct rte_flow_item_tcp tcp;
940         struct rte_flow_item_tcp tcp_mask;
941
942         memset(&tcp, 0, sizeof(tcp));
943         memset(&tcp_mask, 0, sizeof(tcp_mask));
944         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
945                 /*
946                  * The HW has no decrement operation, only increment operation.
947                  * To simulate decrement X from Y using increment operation
948                  * we need to add UINT32_MAX X times to Y.
949                  * Each adding of UINT32_MAX decrements Y by 1.
950                  */
951                 value *= UINT32_MAX;
952         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
953         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
954         item.type = RTE_FLOW_ITEM_TYPE_TCP;
955         item.spec = &tcp;
956         item.mask = &tcp_mask;
957         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
958                                              MLX5_MODIFICATION_TYPE_ADD, error);
959 }
960
961 /**
962  * Convert modify-header increment/decrement TCP Acknowledgment number
963  * to DV specification.
964  *
965  * @param[in,out] resource
966  *   Pointer to the modify-header resource.
967  * @param[in] action
968  *   Pointer to action specification.
969  * @param[out] error
970  *   Pointer to the error structure.
971  *
972  * @return
973  *   0 on success, a negative errno value otherwise and rte_errno is set.
974  */
975 static int
976 flow_dv_convert_action_modify_tcp_ack
977                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
978                          const struct rte_flow_action *action,
979                          struct rte_flow_error *error)
980 {
981         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
982         uint64_t value = rte_be_to_cpu_32(*conf);
983         struct rte_flow_item item;
984         struct rte_flow_item_tcp tcp;
985         struct rte_flow_item_tcp tcp_mask;
986
987         memset(&tcp, 0, sizeof(tcp));
988         memset(&tcp_mask, 0, sizeof(tcp_mask));
989         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
990                 /*
991                  * The HW has no decrement operation, only increment operation.
992                  * To simulate decrement X from Y using increment operation
993                  * we need to add UINT32_MAX X times to Y.
994                  * Each adding of UINT32_MAX decrements Y by 1.
995                  */
996                 value *= UINT32_MAX;
997         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
998         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
999         item.type = RTE_FLOW_ITEM_TYPE_TCP;
1000         item.spec = &tcp;
1001         item.mask = &tcp_mask;
1002         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
1003                                              MLX5_MODIFICATION_TYPE_ADD, error);
1004 }
1005
1006 static enum mlx5_modification_field reg_to_field[] = {
1007         [REG_NON] = MLX5_MODI_OUT_NONE,
1008         [REG_A] = MLX5_MODI_META_DATA_REG_A,
1009         [REG_B] = MLX5_MODI_META_DATA_REG_B,
1010         [REG_C_0] = MLX5_MODI_META_REG_C_0,
1011         [REG_C_1] = MLX5_MODI_META_REG_C_1,
1012         [REG_C_2] = MLX5_MODI_META_REG_C_2,
1013         [REG_C_3] = MLX5_MODI_META_REG_C_3,
1014         [REG_C_4] = MLX5_MODI_META_REG_C_4,
1015         [REG_C_5] = MLX5_MODI_META_REG_C_5,
1016         [REG_C_6] = MLX5_MODI_META_REG_C_6,
1017         [REG_C_7] = MLX5_MODI_META_REG_C_7,
1018 };
1019
1020 /**
1021  * Convert register set to DV specification.
1022  *
1023  * @param[in,out] resource
1024  *   Pointer to the modify-header resource.
1025  * @param[in] action
1026  *   Pointer to action specification.
1027  * @param[out] error
1028  *   Pointer to the error structure.
1029  *
1030  * @return
1031  *   0 on success, a negative errno value otherwise and rte_errno is set.
1032  */
1033 static int
1034 flow_dv_convert_action_set_reg
1035                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1036                          const struct rte_flow_action *action,
1037                          struct rte_flow_error *error)
1038 {
1039         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
1040         struct mlx5_modification_cmd *actions = resource->actions;
1041         uint32_t i = resource->actions_num;
1042
1043         if (i >= MLX5_MAX_MODIFY_NUM)
1044                 return rte_flow_error_set(error, EINVAL,
1045                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1046                                           "too many items to modify");
1047         MLX5_ASSERT(conf->id != REG_NON);
1048         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
1049         actions[i] = (struct mlx5_modification_cmd) {
1050                 .action_type = MLX5_MODIFICATION_TYPE_SET,
1051                 .field = reg_to_field[conf->id],
1052                 .offset = conf->offset,
1053                 .length = conf->length,
1054         };
1055         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1056         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1057         ++i;
1058         resource->actions_num = i;
1059         return 0;
1060 }
1061
1062 /**
1063  * Convert SET_TAG action to DV specification.
1064  *
1065  * @param[in] dev
1066  *   Pointer to the rte_eth_dev structure.
1067  * @param[in,out] resource
1068  *   Pointer to the modify-header resource.
1069  * @param[in] conf
1070  *   Pointer to action specification.
1071  * @param[out] error
1072  *   Pointer to the error structure.
1073  *
1074  * @return
1075  *   0 on success, a negative errno value otherwise and rte_errno is set.
1076  */
1077 static int
1078 flow_dv_convert_action_set_tag
1079                         (struct rte_eth_dev *dev,
1080                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1081                          const struct rte_flow_action_set_tag *conf,
1082                          struct rte_flow_error *error)
1083 {
1084         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1085         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1086         struct rte_flow_item item = {
1087                 .spec = &data,
1088                 .mask = &mask,
1089         };
1090         struct field_modify_info reg_c_x[] = {
1091                 [1] = {0, 0, 0},
1092         };
1093         enum mlx5_modification_field reg_type;
1094         int ret;
1095
1096         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1097         if (ret < 0)
1098                 return ret;
1099         MLX5_ASSERT(ret != REG_NON);
1100         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1101         reg_type = reg_to_field[ret];
1102         MLX5_ASSERT(reg_type > 0);
1103         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1104         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1105                                              MLX5_MODIFICATION_TYPE_SET, error);
1106 }
1107
1108 /**
1109  * Convert internal COPY_REG action to DV specification.
1110  *
1111  * @param[in] dev
1112  *   Pointer to the rte_eth_dev structure.
1113  * @param[in,out] res
1114  *   Pointer to the modify-header resource.
1115  * @param[in] action
1116  *   Pointer to action specification.
1117  * @param[out] error
1118  *   Pointer to the error structure.
1119  *
1120  * @return
1121  *   0 on success, a negative errno value otherwise and rte_errno is set.
1122  */
1123 static int
1124 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1125                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1126                                  const struct rte_flow_action *action,
1127                                  struct rte_flow_error *error)
1128 {
1129         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1130         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1131         struct rte_flow_item item = {
1132                 .spec = NULL,
1133                 .mask = &mask,
1134         };
1135         struct field_modify_info reg_src[] = {
1136                 {4, 0, reg_to_field[conf->src]},
1137                 {0, 0, 0},
1138         };
1139         struct field_modify_info reg_dst = {
1140                 .offset = 0,
1141                 .id = reg_to_field[conf->dst],
1142         };
1143         /* Adjust reg_c[0] usage according to reported mask. */
1144         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1145                 struct mlx5_priv *priv = dev->data->dev_private;
1146                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1147
1148                 MLX5_ASSERT(reg_c0);
1149                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1150                 if (conf->dst == REG_C_0) {
1151                         /* Copy to reg_c[0], within mask only. */
1152                         reg_dst.offset = rte_bsf32(reg_c0);
1153                         mask = rte_cpu_to_be_32(reg_c0 >> reg_dst.offset);
1154                 } else {
1155                         reg_dst.offset = 0;
1156                         mask = rte_cpu_to_be_32(reg_c0);
1157                 }
1158         }
1159         return flow_dv_convert_modify_action(&item,
1160                                              reg_src, &reg_dst, res,
1161                                              MLX5_MODIFICATION_TYPE_COPY,
1162                                              error);
1163 }
1164
1165 /**
1166  * Convert MARK action to DV specification. This routine is used
1167  * in extensive metadata only and requires metadata register to be
1168  * handled. In legacy mode hardware tag resource is engaged.
1169  *
1170  * @param[in] dev
1171  *   Pointer to the rte_eth_dev structure.
1172  * @param[in] conf
1173  *   Pointer to MARK action specification.
1174  * @param[in,out] resource
1175  *   Pointer to the modify-header resource.
1176  * @param[out] error
1177  *   Pointer to the error structure.
1178  *
1179  * @return
1180  *   0 on success, a negative errno value otherwise and rte_errno is set.
1181  */
1182 static int
1183 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1184                             const struct rte_flow_action_mark *conf,
1185                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1186                             struct rte_flow_error *error)
1187 {
1188         struct mlx5_priv *priv = dev->data->dev_private;
1189         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1190                                            priv->sh->dv_mark_mask);
1191         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1192         struct rte_flow_item item = {
1193                 .spec = &data,
1194                 .mask = &mask,
1195         };
1196         struct field_modify_info reg_c_x[] = {
1197                 [1] = {0, 0, 0},
1198         };
1199         int reg;
1200
1201         if (!mask)
1202                 return rte_flow_error_set(error, EINVAL,
1203                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1204                                           NULL, "zero mark action mask");
1205         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1206         if (reg < 0)
1207                 return reg;
1208         MLX5_ASSERT(reg > 0);
1209         if (reg == REG_C_0) {
1210                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1211                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1212
1213                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1214                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1215                 mask = rte_cpu_to_be_32(mask << shl_c0);
1216         }
1217         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1218         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1219                                              MLX5_MODIFICATION_TYPE_SET, error);
1220 }
1221
1222 /**
1223  * Get metadata register index for specified steering domain.
1224  *
1225  * @param[in] dev
1226  *   Pointer to the rte_eth_dev structure.
1227  * @param[in] attr
1228  *   Attributes of flow to determine steering domain.
1229  * @param[out] error
1230  *   Pointer to the error structure.
1231  *
1232  * @return
1233  *   positive index on success, a negative errno value otherwise
1234  *   and rte_errno is set.
1235  */
1236 static enum modify_reg
1237 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1238                          const struct rte_flow_attr *attr,
1239                          struct rte_flow_error *error)
1240 {
1241         int reg =
1242                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1243                                           MLX5_METADATA_FDB :
1244                                             attr->egress ?
1245                                             MLX5_METADATA_TX :
1246                                             MLX5_METADATA_RX, 0, error);
1247         if (reg < 0)
1248                 return rte_flow_error_set(error,
1249                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1250                                           NULL, "unavailable "
1251                                           "metadata register");
1252         return reg;
1253 }
1254
1255 /**
1256  * Convert SET_META action to DV specification.
1257  *
1258  * @param[in] dev
1259  *   Pointer to the rte_eth_dev structure.
1260  * @param[in,out] resource
1261  *   Pointer to the modify-header resource.
1262  * @param[in] attr
1263  *   Attributes of flow that includes this item.
1264  * @param[in] conf
1265  *   Pointer to action specification.
1266  * @param[out] error
1267  *   Pointer to the error structure.
1268  *
1269  * @return
1270  *   0 on success, a negative errno value otherwise and rte_errno is set.
1271  */
1272 static int
1273 flow_dv_convert_action_set_meta
1274                         (struct rte_eth_dev *dev,
1275                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1276                          const struct rte_flow_attr *attr,
1277                          const struct rte_flow_action_set_meta *conf,
1278                          struct rte_flow_error *error)
1279 {
1280         uint32_t mask = rte_cpu_to_be_32(conf->mask);
1281         uint32_t data = rte_cpu_to_be_32(conf->data) & mask;
1282         struct rte_flow_item item = {
1283                 .spec = &data,
1284                 .mask = &mask,
1285         };
1286         struct field_modify_info reg_c_x[] = {
1287                 [1] = {0, 0, 0},
1288         };
1289         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1290
1291         if (reg < 0)
1292                 return reg;
1293         MLX5_ASSERT(reg != REG_NON);
1294         if (reg == REG_C_0) {
1295                 struct mlx5_priv *priv = dev->data->dev_private;
1296                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1297                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1298
1299                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1300                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1301                 mask = rte_cpu_to_be_32(mask << shl_c0);
1302         }
1303         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1304         /* The routine expects parameters in memory as big-endian ones. */
1305         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1306                                              MLX5_MODIFICATION_TYPE_SET, error);
1307 }
1308
1309 /**
1310  * Convert modify-header set IPv4 DSCP action to DV specification.
1311  *
1312  * @param[in,out] resource
1313  *   Pointer to the modify-header resource.
1314  * @param[in] action
1315  *   Pointer to action specification.
1316  * @param[out] error
1317  *   Pointer to the error structure.
1318  *
1319  * @return
1320  *   0 on success, a negative errno value otherwise and rte_errno is set.
1321  */
1322 static int
1323 flow_dv_convert_action_modify_ipv4_dscp
1324                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1325                          const struct rte_flow_action *action,
1326                          struct rte_flow_error *error)
1327 {
1328         const struct rte_flow_action_set_dscp *conf =
1329                 (const struct rte_flow_action_set_dscp *)(action->conf);
1330         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1331         struct rte_flow_item_ipv4 ipv4;
1332         struct rte_flow_item_ipv4 ipv4_mask;
1333
1334         memset(&ipv4, 0, sizeof(ipv4));
1335         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1336         ipv4.hdr.type_of_service = conf->dscp;
1337         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1338         item.spec = &ipv4;
1339         item.mask = &ipv4_mask;
1340         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1341                                              MLX5_MODIFICATION_TYPE_SET, error);
1342 }
1343
1344 /**
1345  * Convert modify-header set IPv6 DSCP action to DV specification.
1346  *
1347  * @param[in,out] resource
1348  *   Pointer to the modify-header resource.
1349  * @param[in] action
1350  *   Pointer to action specification.
1351  * @param[out] error
1352  *   Pointer to the error structure.
1353  *
1354  * @return
1355  *   0 on success, a negative errno value otherwise and rte_errno is set.
1356  */
1357 static int
1358 flow_dv_convert_action_modify_ipv6_dscp
1359                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1360                          const struct rte_flow_action *action,
1361                          struct rte_flow_error *error)
1362 {
1363         const struct rte_flow_action_set_dscp *conf =
1364                 (const struct rte_flow_action_set_dscp *)(action->conf);
1365         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1366         struct rte_flow_item_ipv6 ipv6;
1367         struct rte_flow_item_ipv6 ipv6_mask;
1368
1369         memset(&ipv6, 0, sizeof(ipv6));
1370         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1371         /*
1372          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1373          * rdma-core only accept the DSCP bits byte aligned start from
1374          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1375          * bits in IPv6 case as rdma-core requires byte aligned value.
1376          */
1377         ipv6.hdr.vtc_flow = conf->dscp;
1378         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1379         item.spec = &ipv6;
1380         item.mask = &ipv6_mask;
1381         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1382                                              MLX5_MODIFICATION_TYPE_SET, error);
1383 }
1384
1385 static int
1386 mlx5_flow_item_field_width(struct rte_eth_dev *dev,
1387                            enum rte_flow_field_id field, int inherit,
1388                            const struct rte_flow_attr *attr,
1389                            struct rte_flow_error *error)
1390 {
1391         struct mlx5_priv *priv = dev->data->dev_private;
1392
1393         switch (field) {
1394         case RTE_FLOW_FIELD_START:
1395                 return 32;
1396         case RTE_FLOW_FIELD_MAC_DST:
1397         case RTE_FLOW_FIELD_MAC_SRC:
1398                 return 48;
1399         case RTE_FLOW_FIELD_VLAN_TYPE:
1400                 return 16;
1401         case RTE_FLOW_FIELD_VLAN_ID:
1402                 return 12;
1403         case RTE_FLOW_FIELD_MAC_TYPE:
1404                 return 16;
1405         case RTE_FLOW_FIELD_IPV4_DSCP:
1406                 return 6;
1407         case RTE_FLOW_FIELD_IPV4_TTL:
1408                 return 8;
1409         case RTE_FLOW_FIELD_IPV4_SRC:
1410         case RTE_FLOW_FIELD_IPV4_DST:
1411                 return 32;
1412         case RTE_FLOW_FIELD_IPV6_DSCP:
1413                 return 6;
1414         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1415                 return 8;
1416         case RTE_FLOW_FIELD_IPV6_SRC:
1417         case RTE_FLOW_FIELD_IPV6_DST:
1418                 return 128;
1419         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1420         case RTE_FLOW_FIELD_TCP_PORT_DST:
1421                 return 16;
1422         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1423         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1424                 return 32;
1425         case RTE_FLOW_FIELD_TCP_FLAGS:
1426                 return 9;
1427         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1428         case RTE_FLOW_FIELD_UDP_PORT_DST:
1429                 return 16;
1430         case RTE_FLOW_FIELD_VXLAN_VNI:
1431         case RTE_FLOW_FIELD_GENEVE_VNI:
1432                 return 24;
1433         case RTE_FLOW_FIELD_GTP_TEID:
1434         case RTE_FLOW_FIELD_TAG:
1435                 return 32;
1436         case RTE_FLOW_FIELD_MARK:
1437                 return __builtin_popcount(priv->sh->dv_mark_mask);
1438         case RTE_FLOW_FIELD_META:
1439                 return (flow_dv_get_metadata_reg(dev, attr, error) == REG_C_0) ?
1440                         __builtin_popcount(priv->sh->dv_meta_mask) : 32;
1441         case RTE_FLOW_FIELD_POINTER:
1442         case RTE_FLOW_FIELD_VALUE:
1443                 return inherit < 0 ? 0 : inherit;
1444         default:
1445                 MLX5_ASSERT(false);
1446         }
1447         return 0;
1448 }
1449
1450 static void
1451 mlx5_flow_field_id_to_modify_info
1452                 (const struct rte_flow_action_modify_data *data,
1453                  struct field_modify_info *info, uint32_t *mask,
1454                  uint32_t width, uint32_t *shift, struct rte_eth_dev *dev,
1455                  const struct rte_flow_attr *attr, struct rte_flow_error *error)
1456 {
1457         struct mlx5_priv *priv = dev->data->dev_private;
1458         uint32_t idx = 0;
1459         uint32_t off = 0;
1460
1461         switch (data->field) {
1462         case RTE_FLOW_FIELD_START:
1463                 /* not supported yet */
1464                 MLX5_ASSERT(false);
1465                 break;
1466         case RTE_FLOW_FIELD_MAC_DST:
1467                 off = data->offset > 16 ? data->offset - 16 : 0;
1468                 if (mask) {
1469                         if (data->offset < 16) {
1470                                 info[idx] = (struct field_modify_info){2, 4,
1471                                                 MLX5_MODI_OUT_DMAC_15_0};
1472                                 if (width < 16) {
1473                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1474                                                                  (16 - width));
1475                                         width = 0;
1476                                 } else {
1477                                         mask[idx] = RTE_BE16(0xffff);
1478                                         width -= 16;
1479                                 }
1480                                 if (!width)
1481                                         break;
1482                                 ++idx;
1483                         }
1484                         info[idx] = (struct field_modify_info){4, 0,
1485                                                 MLX5_MODI_OUT_DMAC_47_16};
1486                         mask[idx] = rte_cpu_to_be_32((0xffffffff >>
1487                                                       (32 - width)) << off);
1488                 } else {
1489                         if (data->offset < 16)
1490                                 info[idx++] = (struct field_modify_info){2, 4,
1491                                                 MLX5_MODI_OUT_DMAC_15_0};
1492                         info[idx] = (struct field_modify_info){4, 0,
1493                                                 MLX5_MODI_OUT_DMAC_47_16};
1494                 }
1495                 break;
1496         case RTE_FLOW_FIELD_MAC_SRC:
1497                 off = data->offset > 16 ? data->offset - 16 : 0;
1498                 if (mask) {
1499                         if (data->offset < 16) {
1500                                 info[idx] = (struct field_modify_info){2, 4,
1501                                                 MLX5_MODI_OUT_SMAC_15_0};
1502                                 if (width < 16) {
1503                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1504                                                                  (16 - width));
1505                                         width = 0;
1506                                 } else {
1507                                         mask[idx] = RTE_BE16(0xffff);
1508                                         width -= 16;
1509                                 }
1510                                 if (!width)
1511                                         break;
1512                                 ++idx;
1513                         }
1514                         info[idx] = (struct field_modify_info){4, 0,
1515                                                 MLX5_MODI_OUT_SMAC_47_16};
1516                         mask[idx] = rte_cpu_to_be_32((0xffffffff >>
1517                                                       (32 - width)) << off);
1518                 } else {
1519                         if (data->offset < 16)
1520                                 info[idx++] = (struct field_modify_info){2, 4,
1521                                                 MLX5_MODI_OUT_SMAC_15_0};
1522                         info[idx] = (struct field_modify_info){4, 0,
1523                                                 MLX5_MODI_OUT_SMAC_47_16};
1524                 }
1525                 break;
1526         case RTE_FLOW_FIELD_VLAN_TYPE:
1527                 /* not supported yet */
1528                 break;
1529         case RTE_FLOW_FIELD_VLAN_ID:
1530                 info[idx] = (struct field_modify_info){2, 0,
1531                                         MLX5_MODI_OUT_FIRST_VID};
1532                 if (mask)
1533                         mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1534                 break;
1535         case RTE_FLOW_FIELD_MAC_TYPE:
1536                 info[idx] = (struct field_modify_info){2, 0,
1537                                         MLX5_MODI_OUT_ETHERTYPE};
1538                 if (mask)
1539                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1540                 break;
1541         case RTE_FLOW_FIELD_IPV4_DSCP:
1542                 info[idx] = (struct field_modify_info){1, 0,
1543                                         MLX5_MODI_OUT_IP_DSCP};
1544                 if (mask)
1545                         mask[idx] = 0x3f >> (6 - width);
1546                 break;
1547         case RTE_FLOW_FIELD_IPV4_TTL:
1548                 info[idx] = (struct field_modify_info){1, 0,
1549                                         MLX5_MODI_OUT_IPV4_TTL};
1550                 if (mask)
1551                         mask[idx] = 0xff >> (8 - width);
1552                 break;
1553         case RTE_FLOW_FIELD_IPV4_SRC:
1554                 info[idx] = (struct field_modify_info){4, 0,
1555                                         MLX5_MODI_OUT_SIPV4};
1556                 if (mask)
1557                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1558                                                      (32 - width));
1559                 break;
1560         case RTE_FLOW_FIELD_IPV4_DST:
1561                 info[idx] = (struct field_modify_info){4, 0,
1562                                         MLX5_MODI_OUT_DIPV4};
1563                 if (mask)
1564                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1565                                                      (32 - width));
1566                 break;
1567         case RTE_FLOW_FIELD_IPV6_DSCP:
1568                 info[idx] = (struct field_modify_info){1, 0,
1569                                         MLX5_MODI_OUT_IP_DSCP};
1570                 if (mask)
1571                         mask[idx] = 0x3f >> (6 - width);
1572                 break;
1573         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1574                 info[idx] = (struct field_modify_info){1, 0,
1575                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1576                 if (mask)
1577                         mask[idx] = 0xff >> (8 - width);
1578                 break;
1579         case RTE_FLOW_FIELD_IPV6_SRC:
1580                 if (mask) {
1581                         if (data->offset < 32) {
1582                                 info[idx] = (struct field_modify_info){4, 12,
1583                                                 MLX5_MODI_OUT_SIPV6_31_0};
1584                                 if (width < 32) {
1585                                         mask[idx] =
1586                                                 rte_cpu_to_be_32(0xffffffff >>
1587                                                                  (32 - width));
1588                                         width = 0;
1589                                 } else {
1590                                         mask[idx] = RTE_BE32(0xffffffff);
1591                                         width -= 32;
1592                                 }
1593                                 if (!width)
1594                                         break;
1595                                 ++idx;
1596                         }
1597                         if (data->offset < 64) {
1598                                 info[idx] = (struct field_modify_info){4, 8,
1599                                                 MLX5_MODI_OUT_SIPV6_63_32};
1600                                 if (width < 32) {
1601                                         mask[idx] =
1602                                                 rte_cpu_to_be_32(0xffffffff >>
1603                                                                  (32 - width));
1604                                         width = 0;
1605                                 } else {
1606                                         mask[idx] = RTE_BE32(0xffffffff);
1607                                         width -= 32;
1608                                 }
1609                                 if (!width)
1610                                         break;
1611                                 ++idx;
1612                         }
1613                         if (data->offset < 96) {
1614                                 info[idx] = (struct field_modify_info){4, 4,
1615                                                 MLX5_MODI_OUT_SIPV6_95_64};
1616                                 if (width < 32) {
1617                                         mask[idx] =
1618                                                 rte_cpu_to_be_32(0xffffffff >>
1619                                                                  (32 - width));
1620                                         width = 0;
1621                                 } else {
1622                                         mask[idx] = RTE_BE32(0xffffffff);
1623                                         width -= 32;
1624                                 }
1625                                 if (!width)
1626                                         break;
1627                                 ++idx;
1628                         }
1629                         info[idx] = (struct field_modify_info){4, 0,
1630                                                 MLX5_MODI_OUT_SIPV6_127_96};
1631                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1632                                                      (32 - width));
1633                 } else {
1634                         if (data->offset < 32)
1635                                 info[idx++] = (struct field_modify_info){4, 12,
1636                                                 MLX5_MODI_OUT_SIPV6_31_0};
1637                         if (data->offset < 64)
1638                                 info[idx++] = (struct field_modify_info){4, 8,
1639                                                 MLX5_MODI_OUT_SIPV6_63_32};
1640                         if (data->offset < 96)
1641                                 info[idx++] = (struct field_modify_info){4, 4,
1642                                                 MLX5_MODI_OUT_SIPV6_95_64};
1643                         if (data->offset < 128)
1644                                 info[idx++] = (struct field_modify_info){4, 0,
1645                                                 MLX5_MODI_OUT_SIPV6_127_96};
1646                 }
1647                 break;
1648         case RTE_FLOW_FIELD_IPV6_DST:
1649                 if (mask) {
1650                         if (data->offset < 32) {
1651                                 info[idx] = (struct field_modify_info){4, 12,
1652                                                 MLX5_MODI_OUT_DIPV6_31_0};
1653                                 if (width < 32) {
1654                                         mask[idx] =
1655                                                 rte_cpu_to_be_32(0xffffffff >>
1656                                                                  (32 - width));
1657                                         width = 0;
1658                                 } else {
1659                                         mask[idx] = RTE_BE32(0xffffffff);
1660                                         width -= 32;
1661                                 }
1662                                 if (!width)
1663                                         break;
1664                                 ++idx;
1665                         }
1666                         if (data->offset < 64) {
1667                                 info[idx] = (struct field_modify_info){4, 8,
1668                                                 MLX5_MODI_OUT_DIPV6_63_32};
1669                                 if (width < 32) {
1670                                         mask[idx] =
1671                                                 rte_cpu_to_be_32(0xffffffff >>
1672                                                                  (32 - width));
1673                                         width = 0;
1674                                 } else {
1675                                         mask[idx] = RTE_BE32(0xffffffff);
1676                                         width -= 32;
1677                                 }
1678                                 if (!width)
1679                                         break;
1680                                 ++idx;
1681                         }
1682                         if (data->offset < 96) {
1683                                 info[idx] = (struct field_modify_info){4, 4,
1684                                                 MLX5_MODI_OUT_DIPV6_95_64};
1685                                 if (width < 32) {
1686                                         mask[idx] =
1687                                                 rte_cpu_to_be_32(0xffffffff >>
1688                                                                  (32 - width));
1689                                         width = 0;
1690                                 } else {
1691                                         mask[idx] = RTE_BE32(0xffffffff);
1692                                         width -= 32;
1693                                 }
1694                                 if (!width)
1695                                         break;
1696                                 ++idx;
1697                         }
1698                         info[idx] = (struct field_modify_info){4, 0,
1699                                                 MLX5_MODI_OUT_DIPV6_127_96};
1700                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1701                                                      (32 - width));
1702                 } else {
1703                         if (data->offset < 32)
1704                                 info[idx++] = (struct field_modify_info){4, 12,
1705                                                 MLX5_MODI_OUT_DIPV6_31_0};
1706                         if (data->offset < 64)
1707                                 info[idx++] = (struct field_modify_info){4, 8,
1708                                                 MLX5_MODI_OUT_DIPV6_63_32};
1709                         if (data->offset < 96)
1710                                 info[idx++] = (struct field_modify_info){4, 4,
1711                                                 MLX5_MODI_OUT_DIPV6_95_64};
1712                         if (data->offset < 128)
1713                                 info[idx++] = (struct field_modify_info){4, 0,
1714                                                 MLX5_MODI_OUT_DIPV6_127_96};
1715                 }
1716                 break;
1717         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1718                 info[idx] = (struct field_modify_info){2, 0,
1719                                         MLX5_MODI_OUT_TCP_SPORT};
1720                 if (mask)
1721                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1722                 break;
1723         case RTE_FLOW_FIELD_TCP_PORT_DST:
1724                 info[idx] = (struct field_modify_info){2, 0,
1725                                         MLX5_MODI_OUT_TCP_DPORT};
1726                 if (mask)
1727                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1728                 break;
1729         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1730                 info[idx] = (struct field_modify_info){4, 0,
1731                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1732                 if (mask)
1733                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1734                                                      (32 - width));
1735                 break;
1736         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1737                 info[idx] = (struct field_modify_info){4, 0,
1738                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1739                 if (mask)
1740                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1741                                                      (32 - width));
1742                 break;
1743         case RTE_FLOW_FIELD_TCP_FLAGS:
1744                 info[idx] = (struct field_modify_info){2, 0,
1745                                         MLX5_MODI_OUT_TCP_FLAGS};
1746                 if (mask)
1747                         mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
1748                 break;
1749         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1750                 info[idx] = (struct field_modify_info){2, 0,
1751                                         MLX5_MODI_OUT_UDP_SPORT};
1752                 if (mask)
1753                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1754                 break;
1755         case RTE_FLOW_FIELD_UDP_PORT_DST:
1756                 info[idx] = (struct field_modify_info){2, 0,
1757                                         MLX5_MODI_OUT_UDP_DPORT};
1758                 if (mask)
1759                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1760                 break;
1761         case RTE_FLOW_FIELD_VXLAN_VNI:
1762                 /* not supported yet */
1763                 break;
1764         case RTE_FLOW_FIELD_GENEVE_VNI:
1765                 /* not supported yet*/
1766                 break;
1767         case RTE_FLOW_FIELD_GTP_TEID:
1768                 info[idx] = (struct field_modify_info){4, 0,
1769                                         MLX5_MODI_GTP_TEID};
1770                 if (mask)
1771                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1772                                                      (32 - width));
1773                 break;
1774         case RTE_FLOW_FIELD_TAG:
1775                 {
1776                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1777                                                    data->level, error);
1778                         if (reg < 0)
1779                                 return;
1780                         MLX5_ASSERT(reg != REG_NON);
1781                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1782                         info[idx] = (struct field_modify_info){4, 0,
1783                                                 reg_to_field[reg]};
1784                         if (mask)
1785                                 mask[idx] =
1786                                         rte_cpu_to_be_32(0xffffffff >>
1787                                                          (32 - width));
1788                 }
1789                 break;
1790         case RTE_FLOW_FIELD_MARK:
1791                 {
1792                         uint32_t mark_mask = priv->sh->dv_mark_mask;
1793                         uint32_t mark_count = __builtin_popcount(mark_mask);
1794                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1795                                                        0, error);
1796                         if (reg < 0)
1797                                 return;
1798                         MLX5_ASSERT(reg != REG_NON);
1799                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1800                         info[idx] = (struct field_modify_info){4, 0,
1801                                                 reg_to_field[reg]};
1802                         if (mask)
1803                                 mask[idx] = rte_cpu_to_be_32((mark_mask >>
1804                                          (mark_count - width)) & mark_mask);
1805                 }
1806                 break;
1807         case RTE_FLOW_FIELD_META:
1808                 {
1809                         uint32_t meta_mask = priv->sh->dv_meta_mask;
1810                         uint32_t meta_count = __builtin_popcount(meta_mask);
1811                         uint32_t msk_c0 =
1812                                 rte_cpu_to_be_32(priv->sh->dv_regc0_mask);
1813                         uint32_t shl_c0 = rte_bsf32(msk_c0);
1814                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1815                         if (reg < 0)
1816                                 return;
1817                         MLX5_ASSERT(reg != REG_NON);
1818                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1819                         if (reg == REG_C_0)
1820                                 *shift = shl_c0;
1821                         info[idx] = (struct field_modify_info){4, 0,
1822                                                 reg_to_field[reg]};
1823                         if (mask)
1824                                 mask[idx] = rte_cpu_to_be_32((meta_mask >>
1825                                         (meta_count - width)) & meta_mask);
1826                 }
1827                 break;
1828         case RTE_FLOW_FIELD_POINTER:
1829         case RTE_FLOW_FIELD_VALUE:
1830         default:
1831                 MLX5_ASSERT(false);
1832                 break;
1833         }
1834 }
1835
1836 /**
1837  * Convert modify_field action to DV specification.
1838  *
1839  * @param[in] dev
1840  *   Pointer to the rte_eth_dev structure.
1841  * @param[in,out] resource
1842  *   Pointer to the modify-header resource.
1843  * @param[in] action
1844  *   Pointer to action specification.
1845  * @param[in] attr
1846  *   Attributes of flow that includes this item.
1847  * @param[out] error
1848  *   Pointer to the error structure.
1849  *
1850  * @return
1851  *   0 on success, a negative errno value otherwise and rte_errno is set.
1852  */
1853 static int
1854 flow_dv_convert_action_modify_field
1855                         (struct rte_eth_dev *dev,
1856                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1857                          const struct rte_flow_action *action,
1858                          const struct rte_flow_attr *attr,
1859                          struct rte_flow_error *error)
1860 {
1861         const struct rte_flow_action_modify_field *conf =
1862                 (const struct rte_flow_action_modify_field *)(action->conf);
1863         struct rte_flow_item item = {
1864                 .spec = NULL,
1865                 .mask = NULL
1866         };
1867         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1868                                                                 {0, 0, 0} };
1869         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1870                                                                 {0, 0, 0} };
1871         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1872         uint32_t type;
1873         uint32_t shift = 0;
1874
1875         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1876             conf->src.field == RTE_FLOW_FIELD_VALUE) {
1877                 type = MLX5_MODIFICATION_TYPE_SET;
1878                 /** For SET fill the destination field (field) first. */
1879                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1880                                                   conf->width, &shift, dev,
1881                                                   attr, error);
1882                 item.spec = conf->src.field == RTE_FLOW_FIELD_POINTER ?
1883                                         (void *)(uintptr_t)conf->src.pvalue :
1884                                         (void *)(uintptr_t)&conf->src.value;
1885         } else {
1886                 type = MLX5_MODIFICATION_TYPE_COPY;
1887                 /** For COPY fill the destination field (dcopy) without mask. */
1888                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1889                                                   conf->width, &shift, dev,
1890                                                   attr, error);
1891                 /** Then construct the source field (field) with mask. */
1892                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1893                                                   conf->width, &shift,
1894                                                   dev, attr, error);
1895         }
1896         item.mask = &mask;
1897         return flow_dv_convert_modify_action(&item,
1898                         field, dcopy, resource, type, error);
1899 }
1900
1901 /**
1902  * Validate MARK item.
1903  *
1904  * @param[in] dev
1905  *   Pointer to the rte_eth_dev structure.
1906  * @param[in] item
1907  *   Item specification.
1908  * @param[in] attr
1909  *   Attributes of flow that includes this item.
1910  * @param[out] error
1911  *   Pointer to error structure.
1912  *
1913  * @return
1914  *   0 on success, a negative errno value otherwise and rte_errno is set.
1915  */
1916 static int
1917 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1918                            const struct rte_flow_item *item,
1919                            const struct rte_flow_attr *attr __rte_unused,
1920                            struct rte_flow_error *error)
1921 {
1922         struct mlx5_priv *priv = dev->data->dev_private;
1923         struct mlx5_dev_config *config = &priv->config;
1924         const struct rte_flow_item_mark *spec = item->spec;
1925         const struct rte_flow_item_mark *mask = item->mask;
1926         const struct rte_flow_item_mark nic_mask = {
1927                 .id = priv->sh->dv_mark_mask,
1928         };
1929         int ret;
1930
1931         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1932                 return rte_flow_error_set(error, ENOTSUP,
1933                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1934                                           "extended metadata feature"
1935                                           " isn't enabled");
1936         if (!mlx5_flow_ext_mreg_supported(dev))
1937                 return rte_flow_error_set(error, ENOTSUP,
1938                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1939                                           "extended metadata register"
1940                                           " isn't supported");
1941         if (!nic_mask.id)
1942                 return rte_flow_error_set(error, ENOTSUP,
1943                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1944                                           "extended metadata register"
1945                                           " isn't available");
1946         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1947         if (ret < 0)
1948                 return ret;
1949         if (!spec)
1950                 return rte_flow_error_set(error, EINVAL,
1951                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1952                                           item->spec,
1953                                           "data cannot be empty");
1954         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1955                 return rte_flow_error_set(error, EINVAL,
1956                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1957                                           &spec->id,
1958                                           "mark id exceeds the limit");
1959         if (!mask)
1960                 mask = &nic_mask;
1961         if (!mask->id)
1962                 return rte_flow_error_set(error, EINVAL,
1963                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1964                                         "mask cannot be zero");
1965
1966         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1967                                         (const uint8_t *)&nic_mask,
1968                                         sizeof(struct rte_flow_item_mark),
1969                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1970         if (ret < 0)
1971                 return ret;
1972         return 0;
1973 }
1974
1975 /**
1976  * Validate META item.
1977  *
1978  * @param[in] dev
1979  *   Pointer to the rte_eth_dev structure.
1980  * @param[in] item
1981  *   Item specification.
1982  * @param[in] attr
1983  *   Attributes of flow that includes this item.
1984  * @param[out] error
1985  *   Pointer to error structure.
1986  *
1987  * @return
1988  *   0 on success, a negative errno value otherwise and rte_errno is set.
1989  */
1990 static int
1991 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1992                            const struct rte_flow_item *item,
1993                            const struct rte_flow_attr *attr,
1994                            struct rte_flow_error *error)
1995 {
1996         struct mlx5_priv *priv = dev->data->dev_private;
1997         struct mlx5_dev_config *config = &priv->config;
1998         const struct rte_flow_item_meta *spec = item->spec;
1999         const struct rte_flow_item_meta *mask = item->mask;
2000         struct rte_flow_item_meta nic_mask = {
2001                 .data = UINT32_MAX
2002         };
2003         int reg;
2004         int ret;
2005
2006         if (!spec)
2007                 return rte_flow_error_set(error, EINVAL,
2008                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2009                                           item->spec,
2010                                           "data cannot be empty");
2011         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2012                 if (!mlx5_flow_ext_mreg_supported(dev))
2013                         return rte_flow_error_set(error, ENOTSUP,
2014                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2015                                           "extended metadata register"
2016                                           " isn't supported");
2017                 reg = flow_dv_get_metadata_reg(dev, attr, error);
2018                 if (reg < 0)
2019                         return reg;
2020                 if (reg == REG_NON)
2021                         return rte_flow_error_set(error, ENOTSUP,
2022                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2023                                         "unavalable extended metadata register");
2024                 if (reg == REG_B)
2025                         return rte_flow_error_set(error, ENOTSUP,
2026                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2027                                           "match on reg_b "
2028                                           "isn't supported");
2029                 if (reg != REG_A)
2030                         nic_mask.data = priv->sh->dv_meta_mask;
2031         } else {
2032                 if (attr->transfer)
2033                         return rte_flow_error_set(error, ENOTSUP,
2034                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2035                                         "extended metadata feature "
2036                                         "should be enabled when "
2037                                         "meta item is requested "
2038                                         "with e-switch mode ");
2039                 if (attr->ingress)
2040                         return rte_flow_error_set(error, ENOTSUP,
2041                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2042                                         "match on metadata for ingress "
2043                                         "is not supported in legacy "
2044                                         "metadata mode");
2045         }
2046         if (!mask)
2047                 mask = &rte_flow_item_meta_mask;
2048         if (!mask->data)
2049                 return rte_flow_error_set(error, EINVAL,
2050                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2051                                         "mask cannot be zero");
2052
2053         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2054                                         (const uint8_t *)&nic_mask,
2055                                         sizeof(struct rte_flow_item_meta),
2056                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2057         return ret;
2058 }
2059
2060 /**
2061  * Validate TAG item.
2062  *
2063  * @param[in] dev
2064  *   Pointer to the rte_eth_dev structure.
2065  * @param[in] item
2066  *   Item specification.
2067  * @param[in] attr
2068  *   Attributes of flow that includes this item.
2069  * @param[out] error
2070  *   Pointer to error structure.
2071  *
2072  * @return
2073  *   0 on success, a negative errno value otherwise and rte_errno is set.
2074  */
2075 static int
2076 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2077                           const struct rte_flow_item *item,
2078                           const struct rte_flow_attr *attr __rte_unused,
2079                           struct rte_flow_error *error)
2080 {
2081         const struct rte_flow_item_tag *spec = item->spec;
2082         const struct rte_flow_item_tag *mask = item->mask;
2083         const struct rte_flow_item_tag nic_mask = {
2084                 .data = RTE_BE32(UINT32_MAX),
2085                 .index = 0xff,
2086         };
2087         int ret;
2088
2089         if (!mlx5_flow_ext_mreg_supported(dev))
2090                 return rte_flow_error_set(error, ENOTSUP,
2091                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2092                                           "extensive metadata register"
2093                                           " isn't supported");
2094         if (!spec)
2095                 return rte_flow_error_set(error, EINVAL,
2096                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2097                                           item->spec,
2098                                           "data cannot be empty");
2099         if (!mask)
2100                 mask = &rte_flow_item_tag_mask;
2101         if (!mask->data)
2102                 return rte_flow_error_set(error, EINVAL,
2103                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2104                                         "mask cannot be zero");
2105
2106         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2107                                         (const uint8_t *)&nic_mask,
2108                                         sizeof(struct rte_flow_item_tag),
2109                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2110         if (ret < 0)
2111                 return ret;
2112         if (mask->index != 0xff)
2113                 return rte_flow_error_set(error, EINVAL,
2114                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2115                                           "partial mask for tag index"
2116                                           " is not supported");
2117         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2118         if (ret < 0)
2119                 return ret;
2120         MLX5_ASSERT(ret != REG_NON);
2121         return 0;
2122 }
2123
2124 /**
2125  * Validate vport item.
2126  *
2127  * @param[in] dev
2128  *   Pointer to the rte_eth_dev structure.
2129  * @param[in] item
2130  *   Item specification.
2131  * @param[in] attr
2132  *   Attributes of flow that includes this item.
2133  * @param[in] item_flags
2134  *   Bit-fields that holds the items detected until now.
2135  * @param[out] error
2136  *   Pointer to error structure.
2137  *
2138  * @return
2139  *   0 on success, a negative errno value otherwise and rte_errno is set.
2140  */
2141 static int
2142 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2143                               const struct rte_flow_item *item,
2144                               const struct rte_flow_attr *attr,
2145                               uint64_t item_flags,
2146                               struct rte_flow_error *error)
2147 {
2148         const struct rte_flow_item_port_id *spec = item->spec;
2149         const struct rte_flow_item_port_id *mask = item->mask;
2150         const struct rte_flow_item_port_id switch_mask = {
2151                         .id = 0xffffffff,
2152         };
2153         struct mlx5_priv *esw_priv;
2154         struct mlx5_priv *dev_priv;
2155         int ret;
2156
2157         if (!attr->transfer)
2158                 return rte_flow_error_set(error, EINVAL,
2159                                           RTE_FLOW_ERROR_TYPE_ITEM,
2160                                           NULL,
2161                                           "match on port id is valid only"
2162                                           " when transfer flag is enabled");
2163         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2164                 return rte_flow_error_set(error, ENOTSUP,
2165                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2166                                           "multiple source ports are not"
2167                                           " supported");
2168         if (!mask)
2169                 mask = &switch_mask;
2170         if (mask->id != 0xffffffff)
2171                 return rte_flow_error_set(error, ENOTSUP,
2172                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2173                                            mask,
2174                                            "no support for partial mask on"
2175                                            " \"id\" field");
2176         ret = mlx5_flow_item_acceptable
2177                                 (item, (const uint8_t *)mask,
2178                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2179                                  sizeof(struct rte_flow_item_port_id),
2180                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2181         if (ret)
2182                 return ret;
2183         if (!spec)
2184                 return 0;
2185         if (spec->id == MLX5_PORT_ESW_MGR)
2186                 return 0;
2187         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2188         if (!esw_priv)
2189                 return rte_flow_error_set(error, rte_errno,
2190                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2191                                           "failed to obtain E-Switch info for"
2192                                           " port");
2193         dev_priv = mlx5_dev_to_eswitch_info(dev);
2194         if (!dev_priv)
2195                 return rte_flow_error_set(error, rte_errno,
2196                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2197                                           NULL,
2198                                           "failed to obtain E-Switch info");
2199         if (esw_priv->domain_id != dev_priv->domain_id)
2200                 return rte_flow_error_set(error, EINVAL,
2201                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2202                                           "cannot match on a port from a"
2203                                           " different E-Switch");
2204         return 0;
2205 }
2206
2207 /**
2208  * Validate VLAN item.
2209  *
2210  * @param[in] item
2211  *   Item specification.
2212  * @param[in] item_flags
2213  *   Bit-fields that holds the items detected until now.
2214  * @param[in] dev
2215  *   Ethernet device flow is being created on.
2216  * @param[out] error
2217  *   Pointer to error structure.
2218  *
2219  * @return
2220  *   0 on success, a negative errno value otherwise and rte_errno is set.
2221  */
2222 static int
2223 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2224                            uint64_t item_flags,
2225                            struct rte_eth_dev *dev,
2226                            struct rte_flow_error *error)
2227 {
2228         const struct rte_flow_item_vlan *mask = item->mask;
2229         const struct rte_flow_item_vlan nic_mask = {
2230                 .tci = RTE_BE16(UINT16_MAX),
2231                 .inner_type = RTE_BE16(UINT16_MAX),
2232                 .has_more_vlan = 1,
2233         };
2234         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2235         int ret;
2236         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2237                                         MLX5_FLOW_LAYER_INNER_L4) :
2238                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2239                                         MLX5_FLOW_LAYER_OUTER_L4);
2240         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2241                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2242
2243         if (item_flags & vlanm)
2244                 return rte_flow_error_set(error, EINVAL,
2245                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2246                                           "multiple VLAN layers not supported");
2247         else if ((item_flags & l34m) != 0)
2248                 return rte_flow_error_set(error, EINVAL,
2249                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2250                                           "VLAN cannot follow L3/L4 layer");
2251         if (!mask)
2252                 mask = &rte_flow_item_vlan_mask;
2253         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2254                                         (const uint8_t *)&nic_mask,
2255                                         sizeof(struct rte_flow_item_vlan),
2256                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2257         if (ret)
2258                 return ret;
2259         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2260                 struct mlx5_priv *priv = dev->data->dev_private;
2261
2262                 if (priv->vmwa_context) {
2263                         /*
2264                          * Non-NULL context means we have a virtual machine
2265                          * and SR-IOV enabled, we have to create VLAN interface
2266                          * to make hypervisor to setup E-Switch vport
2267                          * context correctly. We avoid creating the multiple
2268                          * VLAN interfaces, so we cannot support VLAN tag mask.
2269                          */
2270                         return rte_flow_error_set(error, EINVAL,
2271                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2272                                                   item,
2273                                                   "VLAN tag mask is not"
2274                                                   " supported in virtual"
2275                                                   " environment");
2276                 }
2277         }
2278         return 0;
2279 }
2280
2281 /*
2282  * GTP flags are contained in 1 byte of the format:
2283  * -------------------------------------------
2284  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2285  * |-----------------------------------------|
2286  * | value | Version | PT | Res | E | S | PN |
2287  * -------------------------------------------
2288  *
2289  * Matching is supported only for GTP flags E, S, PN.
2290  */
2291 #define MLX5_GTP_FLAGS_MASK     0x07
2292
2293 /**
2294  * Validate GTP item.
2295  *
2296  * @param[in] dev
2297  *   Pointer to the rte_eth_dev structure.
2298  * @param[in] item
2299  *   Item specification.
2300  * @param[in] item_flags
2301  *   Bit-fields that holds the items detected until now.
2302  * @param[out] error
2303  *   Pointer to error structure.
2304  *
2305  * @return
2306  *   0 on success, a negative errno value otherwise and rte_errno is set.
2307  */
2308 static int
2309 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2310                           const struct rte_flow_item *item,
2311                           uint64_t item_flags,
2312                           struct rte_flow_error *error)
2313 {
2314         struct mlx5_priv *priv = dev->data->dev_private;
2315         const struct rte_flow_item_gtp *spec = item->spec;
2316         const struct rte_flow_item_gtp *mask = item->mask;
2317         const struct rte_flow_item_gtp nic_mask = {
2318                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2319                 .msg_type = 0xff,
2320                 .teid = RTE_BE32(0xffffffff),
2321         };
2322
2323         if (!priv->config.hca_attr.tunnel_stateless_gtp)
2324                 return rte_flow_error_set(error, ENOTSUP,
2325                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2326                                           "GTP support is not enabled");
2327         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2328                 return rte_flow_error_set(error, ENOTSUP,
2329                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2330                                           "multiple tunnel layers not"
2331                                           " supported");
2332         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2333                 return rte_flow_error_set(error, EINVAL,
2334                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2335                                           "no outer UDP layer found");
2336         if (!mask)
2337                 mask = &rte_flow_item_gtp_mask;
2338         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2339                 return rte_flow_error_set(error, ENOTSUP,
2340                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2341                                           "Match is supported for GTP"
2342                                           " flags only");
2343         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2344                                          (const uint8_t *)&nic_mask,
2345                                          sizeof(struct rte_flow_item_gtp),
2346                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2347 }
2348
2349 /**
2350  * Validate GTP PSC item.
2351  *
2352  * @param[in] item
2353  *   Item specification.
2354  * @param[in] last_item
2355  *   Previous validated item in the pattern items.
2356  * @param[in] gtp_item
2357  *   Previous GTP item specification.
2358  * @param[in] attr
2359  *   Pointer to flow attributes.
2360  * @param[out] error
2361  *   Pointer to error structure.
2362  *
2363  * @return
2364  *   0 on success, a negative errno value otherwise and rte_errno is set.
2365  */
2366 static int
2367 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2368                               uint64_t last_item,
2369                               const struct rte_flow_item *gtp_item,
2370                               const struct rte_flow_attr *attr,
2371                               struct rte_flow_error *error)
2372 {
2373         const struct rte_flow_item_gtp *gtp_spec;
2374         const struct rte_flow_item_gtp *gtp_mask;
2375         const struct rte_flow_item_gtp_psc *mask;
2376         const struct rte_flow_item_gtp_psc nic_mask = {
2377                 .hdr.type = 0xF,
2378                 .hdr.qfi = 0x3F,
2379         };
2380
2381         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2382                 return rte_flow_error_set
2383                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2384                          "GTP PSC item must be preceded with GTP item");
2385         gtp_spec = gtp_item->spec;
2386         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2387         /* GTP spec and E flag is requested to match zero. */
2388         if (gtp_spec &&
2389                 (gtp_mask->v_pt_rsv_flags &
2390                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2391                 return rte_flow_error_set
2392                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2393                          "GTP E flag must be 1 to match GTP PSC");
2394         /* Check the flow is not created in group zero. */
2395         if (!attr->transfer && !attr->group)
2396                 return rte_flow_error_set
2397                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2398                          "GTP PSC is not supported for group 0");
2399         /* GTP spec is here and E flag is requested to match zero. */
2400         if (!item->spec)
2401                 return 0;
2402         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2403         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2404                                          (const uint8_t *)&nic_mask,
2405                                          sizeof(struct rte_flow_item_gtp_psc),
2406                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2407 }
2408
2409 /**
2410  * Validate IPV4 item.
2411  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2412  * add specific validation of fragment_offset field,
2413  *
2414  * @param[in] item
2415  *   Item specification.
2416  * @param[in] item_flags
2417  *   Bit-fields that holds the items detected until now.
2418  * @param[out] error
2419  *   Pointer to error structure.
2420  *
2421  * @return
2422  *   0 on success, a negative errno value otherwise and rte_errno is set.
2423  */
2424 static int
2425 flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
2426                            const struct rte_flow_item *item,
2427                            uint64_t item_flags, uint64_t last_item,
2428                            uint16_t ether_type, struct rte_flow_error *error)
2429 {
2430         int ret;
2431         struct mlx5_priv *priv = dev->data->dev_private;
2432         const struct rte_flow_item_ipv4 *spec = item->spec;
2433         const struct rte_flow_item_ipv4 *last = item->last;
2434         const struct rte_flow_item_ipv4 *mask = item->mask;
2435         rte_be16_t fragment_offset_spec = 0;
2436         rte_be16_t fragment_offset_last = 0;
2437         struct rte_flow_item_ipv4 nic_ipv4_mask = {
2438                 .hdr = {
2439                         .src_addr = RTE_BE32(0xffffffff),
2440                         .dst_addr = RTE_BE32(0xffffffff),
2441                         .type_of_service = 0xff,
2442                         .fragment_offset = RTE_BE16(0xffff),
2443                         .next_proto_id = 0xff,
2444                         .time_to_live = 0xff,
2445                 },
2446         };
2447
2448         if (mask && (mask->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK)) {
2449                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2450                 bool ihl_cap = !tunnel ? priv->config.hca_attr.outer_ipv4_ihl :
2451                                priv->config.hca_attr.inner_ipv4_ihl;
2452                 if (!ihl_cap)
2453                         return rte_flow_error_set(error, ENOTSUP,
2454                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2455                                                   item,
2456                                                   "IPV4 ihl offload not supported");
2457                 nic_ipv4_mask.hdr.version_ihl = mask->hdr.version_ihl;
2458         }
2459         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2460                                            ether_type, &nic_ipv4_mask,
2461                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2462         if (ret < 0)
2463                 return ret;
2464         if (spec && mask)
2465                 fragment_offset_spec = spec->hdr.fragment_offset &
2466                                        mask->hdr.fragment_offset;
2467         if (!fragment_offset_spec)
2468                 return 0;
2469         /*
2470          * spec and mask are valid, enforce using full mask to make sure the
2471          * complete value is used correctly.
2472          */
2473         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2474                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2475                 return rte_flow_error_set(error, EINVAL,
2476                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2477                                           item, "must use full mask for"
2478                                           " fragment_offset");
2479         /*
2480          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2481          * indicating this is 1st fragment of fragmented packet.
2482          * This is not yet supported in MLX5, return appropriate error message.
2483          */
2484         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2485                 return rte_flow_error_set(error, ENOTSUP,
2486                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2487                                           "match on first fragment not "
2488                                           "supported");
2489         if (fragment_offset_spec && !last)
2490                 return rte_flow_error_set(error, ENOTSUP,
2491                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2492                                           "specified value not supported");
2493         /* spec and last are valid, validate the specified range. */
2494         fragment_offset_last = last->hdr.fragment_offset &
2495                                mask->hdr.fragment_offset;
2496         /*
2497          * Match on fragment_offset spec 0x2001 and last 0x3fff
2498          * means MF is 1 and frag-offset is > 0.
2499          * This packet is fragment 2nd and onward, excluding last.
2500          * This is not yet supported in MLX5, return appropriate
2501          * error message.
2502          */
2503         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2504             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2505                 return rte_flow_error_set(error, ENOTSUP,
2506                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2507                                           last, "match on following "
2508                                           "fragments not supported");
2509         /*
2510          * Match on fragment_offset spec 0x0001 and last 0x1fff
2511          * means MF is 0 and frag-offset is > 0.
2512          * This packet is last fragment of fragmented packet.
2513          * This is not yet supported in MLX5, return appropriate
2514          * error message.
2515          */
2516         if (fragment_offset_spec == RTE_BE16(1) &&
2517             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2518                 return rte_flow_error_set(error, ENOTSUP,
2519                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2520                                           last, "match on last "
2521                                           "fragment not supported");
2522         /*
2523          * Match on fragment_offset spec 0x0001 and last 0x3fff
2524          * means MF and/or frag-offset is not 0.
2525          * This is a fragmented packet.
2526          * Other range values are invalid and rejected.
2527          */
2528         if (!(fragment_offset_spec == RTE_BE16(1) &&
2529               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2530                 return rte_flow_error_set(error, ENOTSUP,
2531                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2532                                           "specified range not supported");
2533         return 0;
2534 }
2535
2536 /**
2537  * Validate IPV6 fragment extension item.
2538  *
2539  * @param[in] item
2540  *   Item specification.
2541  * @param[in] item_flags
2542  *   Bit-fields that holds the items detected until now.
2543  * @param[out] error
2544  *   Pointer to error structure.
2545  *
2546  * @return
2547  *   0 on success, a negative errno value otherwise and rte_errno is set.
2548  */
2549 static int
2550 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2551                                     uint64_t item_flags,
2552                                     struct rte_flow_error *error)
2553 {
2554         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2555         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2556         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2557         rte_be16_t frag_data_spec = 0;
2558         rte_be16_t frag_data_last = 0;
2559         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2560         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2561                                       MLX5_FLOW_LAYER_OUTER_L4;
2562         int ret = 0;
2563         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2564                 .hdr = {
2565                         .next_header = 0xff,
2566                         .frag_data = RTE_BE16(0xffff),
2567                 },
2568         };
2569
2570         if (item_flags & l4m)
2571                 return rte_flow_error_set(error, EINVAL,
2572                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2573                                           "ipv6 fragment extension item cannot "
2574                                           "follow L4 item.");
2575         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2576             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2577                 return rte_flow_error_set(error, EINVAL,
2578                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2579                                           "ipv6 fragment extension item must "
2580                                           "follow ipv6 item");
2581         if (spec && mask)
2582                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2583         if (!frag_data_spec)
2584                 return 0;
2585         /*
2586          * spec and mask are valid, enforce using full mask to make sure the
2587          * complete value is used correctly.
2588          */
2589         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2590                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2591                 return rte_flow_error_set(error, EINVAL,
2592                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2593                                           item, "must use full mask for"
2594                                           " frag_data");
2595         /*
2596          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2597          * This is 1st fragment of fragmented packet.
2598          */
2599         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2600                 return rte_flow_error_set(error, ENOTSUP,
2601                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2602                                           "match on first fragment not "
2603                                           "supported");
2604         if (frag_data_spec && !last)
2605                 return rte_flow_error_set(error, EINVAL,
2606                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2607                                           "specified value not supported");
2608         ret = mlx5_flow_item_acceptable
2609                                 (item, (const uint8_t *)mask,
2610                                  (const uint8_t *)&nic_mask,
2611                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2612                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2613         if (ret)
2614                 return ret;
2615         /* spec and last are valid, validate the specified range. */
2616         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2617         /*
2618          * Match on frag_data spec 0x0009 and last 0xfff9
2619          * means M is 1 and frag-offset is > 0.
2620          * This packet is fragment 2nd and onward, excluding last.
2621          * This is not yet supported in MLX5, return appropriate
2622          * error message.
2623          */
2624         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2625                                        RTE_IPV6_EHDR_MF_MASK) &&
2626             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2627                 return rte_flow_error_set(error, ENOTSUP,
2628                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2629                                           last, "match on following "
2630                                           "fragments not supported");
2631         /*
2632          * Match on frag_data spec 0x0008 and last 0xfff8
2633          * means M is 0 and frag-offset is > 0.
2634          * This packet is last fragment of fragmented packet.
2635          * This is not yet supported in MLX5, return appropriate
2636          * error message.
2637          */
2638         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2639             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2640                 return rte_flow_error_set(error, ENOTSUP,
2641                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2642                                           last, "match on last "
2643                                           "fragment not supported");
2644         /* Other range values are invalid and rejected. */
2645         return rte_flow_error_set(error, EINVAL,
2646                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2647                                   "specified range not supported");
2648 }
2649
2650 /*
2651  * Validate ASO CT item.
2652  *
2653  * @param[in] dev
2654  *   Pointer to the rte_eth_dev structure.
2655  * @param[in] item
2656  *   Item specification.
2657  * @param[in] item_flags
2658  *   Pointer to bit-fields that holds the items detected until now.
2659  * @param[out] error
2660  *   Pointer to error structure.
2661  *
2662  * @return
2663  *   0 on success, a negative errno value otherwise and rte_errno is set.
2664  */
2665 static int
2666 flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
2667                              const struct rte_flow_item *item,
2668                              uint64_t *item_flags,
2669                              struct rte_flow_error *error)
2670 {
2671         const struct rte_flow_item_conntrack *spec = item->spec;
2672         const struct rte_flow_item_conntrack *mask = item->mask;
2673         RTE_SET_USED(dev);
2674         uint32_t flags;
2675
2676         if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
2677                 return rte_flow_error_set(error, EINVAL,
2678                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2679                                           "Only one CT is supported");
2680         if (!mask)
2681                 mask = &rte_flow_item_conntrack_mask;
2682         flags = spec->flags & mask->flags;
2683         if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
2684             ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
2685              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
2686              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
2687                 return rte_flow_error_set(error, EINVAL,
2688                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2689                                           "Conflict status bits");
2690         /* State change also needs to be considered. */
2691         *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
2692         return 0;
2693 }
2694
2695 /**
2696  * Validate the pop VLAN action.
2697  *
2698  * @param[in] dev
2699  *   Pointer to the rte_eth_dev structure.
2700  * @param[in] action_flags
2701  *   Holds the actions detected until now.
2702  * @param[in] action
2703  *   Pointer to the pop vlan action.
2704  * @param[in] item_flags
2705  *   The items found in this flow rule.
2706  * @param[in] attr
2707  *   Pointer to flow attributes.
2708  * @param[out] error
2709  *   Pointer to error structure.
2710  *
2711  * @return
2712  *   0 on success, a negative errno value otherwise and rte_errno is set.
2713  */
2714 static int
2715 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2716                                  uint64_t action_flags,
2717                                  const struct rte_flow_action *action,
2718                                  uint64_t item_flags,
2719                                  const struct rte_flow_attr *attr,
2720                                  struct rte_flow_error *error)
2721 {
2722         const struct mlx5_priv *priv = dev->data->dev_private;
2723         struct mlx5_dev_ctx_shared *sh = priv->sh;
2724         bool direction_error = false;
2725
2726         if (!priv->sh->pop_vlan_action)
2727                 return rte_flow_error_set(error, ENOTSUP,
2728                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2729                                           NULL,
2730                                           "pop vlan action is not supported");
2731         /* Pop VLAN is not supported in egress except for CX6 FDB mode. */
2732         if (attr->transfer) {
2733                 bool fdb_tx = priv->representor_id != UINT16_MAX;
2734                 bool is_cx5 = sh->steering_format_version ==
2735                     MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
2736
2737                 if (fdb_tx && is_cx5)
2738                         direction_error = true;
2739         } else if (attr->egress) {
2740                 direction_error = true;
2741         }
2742         if (direction_error)
2743                 return rte_flow_error_set(error, ENOTSUP,
2744                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2745                                           NULL,
2746                                           "pop vlan action not supported for egress");
2747         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2748                 return rte_flow_error_set(error, ENOTSUP,
2749                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2750                                           "no support for multiple VLAN "
2751                                           "actions");
2752         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2753         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2754             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2755                 return rte_flow_error_set(error, ENOTSUP,
2756                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2757                                           NULL,
2758                                           "cannot pop vlan after decap without "
2759                                           "match on inner vlan in the flow");
2760         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2761         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2762             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2763                 return rte_flow_error_set(error, ENOTSUP,
2764                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2765                                           NULL,
2766                                           "cannot pop vlan without a "
2767                                           "match on (outer) vlan in the flow");
2768         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2769                 return rte_flow_error_set(error, EINVAL,
2770                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2771                                           "wrong action order, port_id should "
2772                                           "be after pop VLAN action");
2773         if (!attr->transfer && priv->representor)
2774                 return rte_flow_error_set(error, ENOTSUP,
2775                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2776                                           "pop vlan action for VF representor "
2777                                           "not supported on NIC table");
2778         return 0;
2779 }
2780
2781 /**
2782  * Get VLAN default info from vlan match info.
2783  *
2784  * @param[in] items
2785  *   the list of item specifications.
2786  * @param[out] vlan
2787  *   pointer VLAN info to fill to.
2788  *
2789  * @return
2790  *   0 on success, a negative errno value otherwise and rte_errno is set.
2791  */
2792 static void
2793 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2794                                   struct rte_vlan_hdr *vlan)
2795 {
2796         const struct rte_flow_item_vlan nic_mask = {
2797                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2798                                 MLX5DV_FLOW_VLAN_VID_MASK),
2799                 .inner_type = RTE_BE16(0xffff),
2800         };
2801
2802         if (items == NULL)
2803                 return;
2804         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2805                 int type = items->type;
2806
2807                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2808                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2809                         break;
2810         }
2811         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2812                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2813                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2814
2815                 /* If VLAN item in pattern doesn't contain data, return here. */
2816                 if (!vlan_v)
2817                         return;
2818                 if (!vlan_m)
2819                         vlan_m = &nic_mask;
2820                 /* Only full match values are accepted */
2821                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2822                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2823                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2824                         vlan->vlan_tci |=
2825                                 rte_be_to_cpu_16(vlan_v->tci &
2826                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2827                 }
2828                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2829                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2830                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2831                         vlan->vlan_tci |=
2832                                 rte_be_to_cpu_16(vlan_v->tci &
2833                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2834                 }
2835                 if (vlan_m->inner_type == nic_mask.inner_type)
2836                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2837                                                            vlan_m->inner_type);
2838         }
2839 }
2840
2841 /**
2842  * Validate the push VLAN action.
2843  *
2844  * @param[in] dev
2845  *   Pointer to the rte_eth_dev structure.
2846  * @param[in] action_flags
2847  *   Holds the actions detected until now.
2848  * @param[in] item_flags
2849  *   The items found in this flow rule.
2850  * @param[in] action
2851  *   Pointer to the action structure.
2852  * @param[in] attr
2853  *   Pointer to flow attributes
2854  * @param[out] error
2855  *   Pointer to error structure.
2856  *
2857  * @return
2858  *   0 on success, a negative errno value otherwise and rte_errno is set.
2859  */
2860 static int
2861 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2862                                   uint64_t action_flags,
2863                                   const struct rte_flow_item_vlan *vlan_m,
2864                                   const struct rte_flow_action *action,
2865                                   const struct rte_flow_attr *attr,
2866                                   struct rte_flow_error *error)
2867 {
2868         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2869         const struct mlx5_priv *priv = dev->data->dev_private;
2870         struct mlx5_dev_ctx_shared *sh = priv->sh;
2871         bool direction_error = false;
2872
2873         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2874             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2875                 return rte_flow_error_set(error, EINVAL,
2876                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2877                                           "invalid vlan ethertype");
2878         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2879                 return rte_flow_error_set(error, EINVAL,
2880                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2881                                           "wrong action order, port_id should "
2882                                           "be after push VLAN");
2883         /* Push VLAN is not supported in ingress except for CX6 FDB mode. */
2884         if (attr->transfer) {
2885                 bool fdb_tx = priv->representor_id != UINT16_MAX;
2886                 bool is_cx5 = sh->steering_format_version ==
2887                     MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
2888
2889                 if (!fdb_tx && is_cx5)
2890                         direction_error = true;
2891         } else if (attr->ingress) {
2892                 direction_error = true;
2893         }
2894         if (direction_error)
2895                 return rte_flow_error_set(error, ENOTSUP,
2896                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2897                                           NULL,
2898                                           "push vlan action not supported for ingress");
2899         if (!attr->transfer && priv->representor)
2900                 return rte_flow_error_set(error, ENOTSUP,
2901                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2902                                           "push vlan action for VF representor "
2903                                           "not supported on NIC table");
2904         if (vlan_m &&
2905             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2906             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2907                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2908             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2909             !(mlx5_flow_find_action
2910                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2911                 return rte_flow_error_set(error, EINVAL,
2912                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2913                                           "not full match mask on VLAN PCP and "
2914                                           "there is no of_set_vlan_pcp action, "
2915                                           "push VLAN action cannot figure out "
2916                                           "PCP value");
2917         if (vlan_m &&
2918             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2919             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2920                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2921             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2922             !(mlx5_flow_find_action
2923                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2924                 return rte_flow_error_set(error, EINVAL,
2925                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2926                                           "not full match mask on VLAN VID and "
2927                                           "there is no of_set_vlan_vid action, "
2928                                           "push VLAN action cannot figure out "
2929                                           "VID value");
2930         (void)attr;
2931         return 0;
2932 }
2933
2934 /**
2935  * Validate the set VLAN PCP.
2936  *
2937  * @param[in] action_flags
2938  *   Holds the actions detected until now.
2939  * @param[in] actions
2940  *   Pointer to the list of actions remaining in the flow rule.
2941  * @param[out] error
2942  *   Pointer to error structure.
2943  *
2944  * @return
2945  *   0 on success, a negative errno value otherwise and rte_errno is set.
2946  */
2947 static int
2948 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2949                                      const struct rte_flow_action actions[],
2950                                      struct rte_flow_error *error)
2951 {
2952         const struct rte_flow_action *action = actions;
2953         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2954
2955         if (conf->vlan_pcp > 7)
2956                 return rte_flow_error_set(error, EINVAL,
2957                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2958                                           "VLAN PCP value is too big");
2959         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2960                 return rte_flow_error_set(error, ENOTSUP,
2961                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2962                                           "set VLAN PCP action must follow "
2963                                           "the push VLAN action");
2964         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2965                 return rte_flow_error_set(error, ENOTSUP,
2966                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2967                                           "Multiple VLAN PCP modification are "
2968                                           "not supported");
2969         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2970                 return rte_flow_error_set(error, EINVAL,
2971                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2972                                           "wrong action order, port_id should "
2973                                           "be after set VLAN PCP");
2974         return 0;
2975 }
2976
2977 /**
2978  * Validate the set VLAN VID.
2979  *
2980  * @param[in] item_flags
2981  *   Holds the items detected in this rule.
2982  * @param[in] action_flags
2983  *   Holds the actions detected until now.
2984  * @param[in] actions
2985  *   Pointer to the list of actions remaining in the flow rule.
2986  * @param[out] error
2987  *   Pointer to error structure.
2988  *
2989  * @return
2990  *   0 on success, a negative errno value otherwise and rte_errno is set.
2991  */
2992 static int
2993 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2994                                      uint64_t action_flags,
2995                                      const struct rte_flow_action actions[],
2996                                      struct rte_flow_error *error)
2997 {
2998         const struct rte_flow_action *action = actions;
2999         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
3000
3001         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
3002                 return rte_flow_error_set(error, EINVAL,
3003                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3004                                           "VLAN VID value is too big");
3005         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
3006             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
3007                 return rte_flow_error_set(error, ENOTSUP,
3008                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3009                                           "set VLAN VID action must follow push"
3010                                           " VLAN action or match on VLAN item");
3011         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
3012                 return rte_flow_error_set(error, ENOTSUP,
3013                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3014                                           "Multiple VLAN VID modifications are "
3015                                           "not supported");
3016         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
3017                 return rte_flow_error_set(error, EINVAL,
3018                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3019                                           "wrong action order, port_id should "
3020                                           "be after set VLAN VID");
3021         return 0;
3022 }
3023
3024 /*
3025  * Validate the FLAG action.
3026  *
3027  * @param[in] dev
3028  *   Pointer to the rte_eth_dev structure.
3029  * @param[in] action_flags
3030  *   Holds the actions detected until now.
3031  * @param[in] attr
3032  *   Pointer to flow attributes
3033  * @param[out] error
3034  *   Pointer to error structure.
3035  *
3036  * @return
3037  *   0 on success, a negative errno value otherwise and rte_errno is set.
3038  */
3039 static int
3040 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
3041                              uint64_t action_flags,
3042                              const struct rte_flow_attr *attr,
3043                              struct rte_flow_error *error)
3044 {
3045         struct mlx5_priv *priv = dev->data->dev_private;
3046         struct mlx5_dev_config *config = &priv->config;
3047         int ret;
3048
3049         /* Fall back if no extended metadata register support. */
3050         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3051                 return mlx5_flow_validate_action_flag(action_flags, attr,
3052                                                       error);
3053         /* Extensive metadata mode requires registers. */
3054         if (!mlx5_flow_ext_mreg_supported(dev))
3055                 return rte_flow_error_set(error, ENOTSUP,
3056                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3057                                           "no metadata registers "
3058                                           "to support flag action");
3059         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
3060                 return rte_flow_error_set(error, ENOTSUP,
3061                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3062                                           "extended metadata register"
3063                                           " isn't available");
3064         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3065         if (ret < 0)
3066                 return ret;
3067         MLX5_ASSERT(ret > 0);
3068         if (action_flags & MLX5_FLOW_ACTION_MARK)
3069                 return rte_flow_error_set(error, EINVAL,
3070                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3071                                           "can't mark and flag in same flow");
3072         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3073                 return rte_flow_error_set(error, EINVAL,
3074                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3075                                           "can't have 2 flag"
3076                                           " actions in same flow");
3077         return 0;
3078 }
3079
3080 /**
3081  * Validate MARK action.
3082  *
3083  * @param[in] dev
3084  *   Pointer to the rte_eth_dev structure.
3085  * @param[in] action
3086  *   Pointer to action.
3087  * @param[in] action_flags
3088  *   Holds the actions detected until now.
3089  * @param[in] attr
3090  *   Pointer to flow attributes
3091  * @param[out] error
3092  *   Pointer to error structure.
3093  *
3094  * @return
3095  *   0 on success, a negative errno value otherwise and rte_errno is set.
3096  */
3097 static int
3098 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
3099                              const struct rte_flow_action *action,
3100                              uint64_t action_flags,
3101                              const struct rte_flow_attr *attr,
3102                              struct rte_flow_error *error)
3103 {
3104         struct mlx5_priv *priv = dev->data->dev_private;
3105         struct mlx5_dev_config *config = &priv->config;
3106         const struct rte_flow_action_mark *mark = action->conf;
3107         int ret;
3108
3109         if (is_tunnel_offload_active(dev))
3110                 return rte_flow_error_set(error, ENOTSUP,
3111                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3112                                           "no mark action "
3113                                           "if tunnel offload active");
3114         /* Fall back if no extended metadata register support. */
3115         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3116                 return mlx5_flow_validate_action_mark(action, action_flags,
3117                                                       attr, error);
3118         /* Extensive metadata mode requires registers. */
3119         if (!mlx5_flow_ext_mreg_supported(dev))
3120                 return rte_flow_error_set(error, ENOTSUP,
3121                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3122                                           "no metadata registers "
3123                                           "to support mark action");
3124         if (!priv->sh->dv_mark_mask)
3125                 return rte_flow_error_set(error, ENOTSUP,
3126                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3127                                           "extended metadata register"
3128                                           " isn't available");
3129         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3130         if (ret < 0)
3131                 return ret;
3132         MLX5_ASSERT(ret > 0);
3133         if (!mark)
3134                 return rte_flow_error_set(error, EINVAL,
3135                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3136                                           "configuration cannot be null");
3137         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3138                 return rte_flow_error_set(error, EINVAL,
3139                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3140                                           &mark->id,
3141                                           "mark id exceeds the limit");
3142         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3143                 return rte_flow_error_set(error, EINVAL,
3144                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3145                                           "can't flag and mark in same flow");
3146         if (action_flags & MLX5_FLOW_ACTION_MARK)
3147                 return rte_flow_error_set(error, EINVAL,
3148                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3149                                           "can't have 2 mark actions in same"
3150                                           " flow");
3151         return 0;
3152 }
3153
3154 /**
3155  * Validate SET_META action.
3156  *
3157  * @param[in] dev
3158  *   Pointer to the rte_eth_dev structure.
3159  * @param[in] action
3160  *   Pointer to the action structure.
3161  * @param[in] action_flags
3162  *   Holds the actions detected until now.
3163  * @param[in] attr
3164  *   Pointer to flow attributes
3165  * @param[out] error
3166  *   Pointer to error structure.
3167  *
3168  * @return
3169  *   0 on success, a negative errno value otherwise and rte_errno is set.
3170  */
3171 static int
3172 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3173                                  const struct rte_flow_action *action,
3174                                  uint64_t action_flags __rte_unused,
3175                                  const struct rte_flow_attr *attr,
3176                                  struct rte_flow_error *error)
3177 {
3178         struct mlx5_priv *priv = dev->data->dev_private;
3179         struct mlx5_dev_config *config = &priv->config;
3180         const struct rte_flow_action_set_meta *conf;
3181         uint32_t nic_mask = UINT32_MAX;
3182         int reg;
3183
3184         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
3185             !mlx5_flow_ext_mreg_supported(dev))
3186                 return rte_flow_error_set(error, ENOTSUP,
3187                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3188                                           "extended metadata register"
3189                                           " isn't supported");
3190         reg = flow_dv_get_metadata_reg(dev, attr, error);
3191         if (reg < 0)
3192                 return reg;
3193         if (reg == REG_NON)
3194                 return rte_flow_error_set(error, ENOTSUP,
3195                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3196                                           "unavalable extended metadata register");
3197         if (reg != REG_A && reg != REG_B) {
3198                 struct mlx5_priv *priv = dev->data->dev_private;
3199
3200                 nic_mask = priv->sh->dv_meta_mask;
3201         }
3202         if (!(action->conf))
3203                 return rte_flow_error_set(error, EINVAL,
3204                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3205                                           "configuration cannot be null");
3206         conf = (const struct rte_flow_action_set_meta *)action->conf;
3207         if (!conf->mask)
3208                 return rte_flow_error_set(error, EINVAL,
3209                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3210                                           "zero mask doesn't have any effect");
3211         if (conf->mask & ~nic_mask)
3212                 return rte_flow_error_set(error, EINVAL,
3213                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3214                                           "meta data must be within reg C0");
3215         return 0;
3216 }
3217
3218 /**
3219  * Validate SET_TAG action.
3220  *
3221  * @param[in] dev
3222  *   Pointer to the rte_eth_dev structure.
3223  * @param[in] action
3224  *   Pointer to the action structure.
3225  * @param[in] action_flags
3226  *   Holds the actions detected until now.
3227  * @param[in] attr
3228  *   Pointer to flow attributes
3229  * @param[out] error
3230  *   Pointer to error structure.
3231  *
3232  * @return
3233  *   0 on success, a negative errno value otherwise and rte_errno is set.
3234  */
3235 static int
3236 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3237                                 const struct rte_flow_action *action,
3238                                 uint64_t action_flags,
3239                                 const struct rte_flow_attr *attr,
3240                                 struct rte_flow_error *error)
3241 {
3242         const struct rte_flow_action_set_tag *conf;
3243         const uint64_t terminal_action_flags =
3244                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3245                 MLX5_FLOW_ACTION_RSS;
3246         int ret;
3247
3248         if (!mlx5_flow_ext_mreg_supported(dev))
3249                 return rte_flow_error_set(error, ENOTSUP,
3250                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3251                                           "extensive metadata register"
3252                                           " isn't supported");
3253         if (!(action->conf))
3254                 return rte_flow_error_set(error, EINVAL,
3255                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3256                                           "configuration cannot be null");
3257         conf = (const struct rte_flow_action_set_tag *)action->conf;
3258         if (!conf->mask)
3259                 return rte_flow_error_set(error, EINVAL,
3260                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3261                                           "zero mask doesn't have any effect");
3262         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3263         if (ret < 0)
3264                 return ret;
3265         if (!attr->transfer && attr->ingress &&
3266             (action_flags & terminal_action_flags))
3267                 return rte_flow_error_set(error, EINVAL,
3268                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3269                                           "set_tag has no effect"
3270                                           " with terminal actions");
3271         return 0;
3272 }
3273
3274 /**
3275  * Validate count action.
3276  *
3277  * @param[in] dev
3278  *   Pointer to rte_eth_dev structure.
3279  * @param[in] shared
3280  *   Indicator if action is shared.
3281  * @param[in] action_flags
3282  *   Holds the actions detected until now.
3283  * @param[out] error
3284  *   Pointer to error structure.
3285  *
3286  * @return
3287  *   0 on success, a negative errno value otherwise and rte_errno is set.
3288  */
3289 static int
3290 flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
3291                               uint64_t action_flags,
3292                               struct rte_flow_error *error)
3293 {
3294         struct mlx5_priv *priv = dev->data->dev_private;
3295
3296         if (!priv->sh->devx)
3297                 goto notsup_err;
3298         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3299                 return rte_flow_error_set(error, EINVAL,
3300                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3301                                           "duplicate count actions set");
3302         if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3303             !priv->sh->flow_hit_aso_en)
3304                 return rte_flow_error_set(error, EINVAL,
3305                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3306                                           "old age and shared count combination is not supported");
3307 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3308         return 0;
3309 #endif
3310 notsup_err:
3311         return rte_flow_error_set
3312                       (error, ENOTSUP,
3313                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3314                        NULL,
3315                        "count action not supported");
3316 }
3317
3318 /**
3319  * Validate the L2 encap action.
3320  *
3321  * @param[in] dev
3322  *   Pointer to the rte_eth_dev structure.
3323  * @param[in] action_flags
3324  *   Holds the actions detected until now.
3325  * @param[in] action
3326  *   Pointer to the action structure.
3327  * @param[in] attr
3328  *   Pointer to flow attributes.
3329  * @param[out] error
3330  *   Pointer to error structure.
3331  *
3332  * @return
3333  *   0 on success, a negative errno value otherwise and rte_errno is set.
3334  */
3335 static int
3336 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3337                                  uint64_t action_flags,
3338                                  const struct rte_flow_action *action,
3339                                  const struct rte_flow_attr *attr,
3340                                  struct rte_flow_error *error)
3341 {
3342         const struct mlx5_priv *priv = dev->data->dev_private;
3343
3344         if (!(action->conf))
3345                 return rte_flow_error_set(error, EINVAL,
3346                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3347                                           "configuration cannot be null");
3348         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3349                 return rte_flow_error_set(error, EINVAL,
3350                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3351                                           "can only have a single encap action "
3352                                           "in a flow");
3353         if (!attr->transfer && priv->representor)
3354                 return rte_flow_error_set(error, ENOTSUP,
3355                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3356                                           "encap action for VF representor "
3357                                           "not supported on NIC table");
3358         return 0;
3359 }
3360
3361 /**
3362  * Validate a decap action.
3363  *
3364  * @param[in] dev
3365  *   Pointer to the rte_eth_dev structure.
3366  * @param[in] action_flags
3367  *   Holds the actions detected until now.
3368  * @param[in] action
3369  *   Pointer to the action structure.
3370  * @param[in] item_flags
3371  *   Holds the items detected.
3372  * @param[in] attr
3373  *   Pointer to flow attributes
3374  * @param[out] error
3375  *   Pointer to error structure.
3376  *
3377  * @return
3378  *   0 on success, a negative errno value otherwise and rte_errno is set.
3379  */
3380 static int
3381 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3382                               uint64_t action_flags,
3383                               const struct rte_flow_action *action,
3384                               const uint64_t item_flags,
3385                               const struct rte_flow_attr *attr,
3386                               struct rte_flow_error *error)
3387 {
3388         const struct mlx5_priv *priv = dev->data->dev_private;
3389
3390         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
3391             !priv->config.decap_en)
3392                 return rte_flow_error_set(error, ENOTSUP,
3393                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3394                                           "decap is not enabled");
3395         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3396                 return rte_flow_error_set(error, ENOTSUP,
3397                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3398                                           action_flags &
3399                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3400                                           "have a single decap action" : "decap "
3401                                           "after encap is not supported");
3402         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3403                 return rte_flow_error_set(error, EINVAL,
3404                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3405                                           "can't have decap action after"
3406                                           " modify action");
3407         if (attr->egress)
3408                 return rte_flow_error_set(error, ENOTSUP,
3409                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3410                                           NULL,
3411                                           "decap action not supported for "
3412                                           "egress");
3413         if (!attr->transfer && priv->representor)
3414                 return rte_flow_error_set(error, ENOTSUP,
3415                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3416                                           "decap action for VF representor "
3417                                           "not supported on NIC table");
3418         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3419             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3420                 return rte_flow_error_set(error, ENOTSUP,
3421                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3422                                 "VXLAN item should be present for VXLAN decap");
3423         return 0;
3424 }
3425
3426 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3427
3428 /**
3429  * Validate the raw encap and decap actions.
3430  *
3431  * @param[in] dev
3432  *   Pointer to the rte_eth_dev structure.
3433  * @param[in] decap
3434  *   Pointer to the decap action.
3435  * @param[in] encap
3436  *   Pointer to the encap action.
3437  * @param[in] attr
3438  *   Pointer to flow attributes
3439  * @param[in/out] action_flags
3440  *   Holds the actions detected until now.
3441  * @param[out] actions_n
3442  *   pointer to the number of actions counter.
3443  * @param[in] action
3444  *   Pointer to the action structure.
3445  * @param[in] item_flags
3446  *   Holds the items detected.
3447  * @param[out] error
3448  *   Pointer to error structure.
3449  *
3450  * @return
3451  *   0 on success, a negative errno value otherwise and rte_errno is set.
3452  */
3453 static int
3454 flow_dv_validate_action_raw_encap_decap
3455         (struct rte_eth_dev *dev,
3456          const struct rte_flow_action_raw_decap *decap,
3457          const struct rte_flow_action_raw_encap *encap,
3458          const struct rte_flow_attr *attr, uint64_t *action_flags,
3459          int *actions_n, const struct rte_flow_action *action,
3460          uint64_t item_flags, struct rte_flow_error *error)
3461 {
3462         const struct mlx5_priv *priv = dev->data->dev_private;
3463         int ret;
3464
3465         if (encap && (!encap->size || !encap->data))
3466                 return rte_flow_error_set(error, EINVAL,
3467                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3468                                           "raw encap data cannot be empty");
3469         if (decap && encap) {
3470                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3471                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3472                         /* L3 encap. */
3473                         decap = NULL;
3474                 else if (encap->size <=
3475                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3476                            decap->size >
3477                            MLX5_ENCAPSULATION_DECISION_SIZE)
3478                         /* L3 decap. */
3479                         encap = NULL;
3480                 else if (encap->size >
3481                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3482                            decap->size >
3483                            MLX5_ENCAPSULATION_DECISION_SIZE)
3484                         /* 2 L2 actions: encap and decap. */
3485                         ;
3486                 else
3487                         return rte_flow_error_set(error,
3488                                 ENOTSUP,
3489                                 RTE_FLOW_ERROR_TYPE_ACTION,
3490                                 NULL, "unsupported too small "
3491                                 "raw decap and too small raw "
3492                                 "encap combination");
3493         }
3494         if (decap) {
3495                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3496                                                     item_flags, attr, error);
3497                 if (ret < 0)
3498                         return ret;
3499                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3500                 ++(*actions_n);
3501         }
3502         if (encap) {
3503                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3504                         return rte_flow_error_set(error, ENOTSUP,
3505                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3506                                                   NULL,
3507                                                   "small raw encap size");
3508                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3509                         return rte_flow_error_set(error, EINVAL,
3510                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3511                                                   NULL,
3512                                                   "more than one encap action");
3513                 if (!attr->transfer && priv->representor)
3514                         return rte_flow_error_set
3515                                         (error, ENOTSUP,
3516                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3517                                          "encap action for VF representor "
3518                                          "not supported on NIC table");
3519                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3520                 ++(*actions_n);
3521         }
3522         return 0;
3523 }
3524
3525 /*
3526  * Validate the ASO CT action.
3527  *
3528  * @param[in] dev
3529  *   Pointer to the rte_eth_dev structure.
3530  * @param[in] action_flags
3531  *   Holds the actions detected until now.
3532  * @param[in] item_flags
3533  *   The items found in this flow rule.
3534  * @param[in] attr
3535  *   Pointer to flow attributes.
3536  * @param[out] error
3537  *   Pointer to error structure.
3538  *
3539  * @return
3540  *   0 on success, a negative errno value otherwise and rte_errno is set.
3541  */
3542 static int
3543 flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3544                                uint64_t action_flags,
3545                                uint64_t item_flags,
3546                                const struct rte_flow_attr *attr,
3547                                struct rte_flow_error *error)
3548 {
3549         RTE_SET_USED(dev);
3550
3551         if (attr->group == 0 && !attr->transfer)
3552                 return rte_flow_error_set(error, ENOTSUP,
3553                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3554                                           NULL,
3555                                           "Only support non-root table");
3556         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
3557                 return rte_flow_error_set(error, ENOTSUP,
3558                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3559                                           "CT cannot follow a fate action");
3560         if ((action_flags & MLX5_FLOW_ACTION_METER) ||
3561             (action_flags & MLX5_FLOW_ACTION_AGE))
3562                 return rte_flow_error_set(error, EINVAL,
3563                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3564                                           "Only one ASO action is supported");
3565         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3566                 return rte_flow_error_set(error, EINVAL,
3567                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3568                                           "Encap cannot exist before CT");
3569         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3570                 return rte_flow_error_set(error, EINVAL,
3571                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3572                                           "Not a outer TCP packet");
3573         return 0;
3574 }
3575
3576 int
3577 flow_dv_encap_decap_match_cb(void *tool_ctx __rte_unused,
3578                              struct mlx5_list_entry *entry, void *cb_ctx)
3579 {
3580         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3581         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3582         struct mlx5_flow_dv_encap_decap_resource *resource;
3583
3584         resource = container_of(entry, struct mlx5_flow_dv_encap_decap_resource,
3585                                 entry);
3586         if (resource->reformat_type == ctx_resource->reformat_type &&
3587             resource->ft_type == ctx_resource->ft_type &&
3588             resource->flags == ctx_resource->flags &&
3589             resource->size == ctx_resource->size &&
3590             !memcmp((const void *)resource->buf,
3591                     (const void *)ctx_resource->buf,
3592                     resource->size))
3593                 return 0;
3594         return -1;
3595 }
3596
3597 struct mlx5_list_entry *
3598 flow_dv_encap_decap_create_cb(void *tool_ctx, void *cb_ctx)
3599 {
3600         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3601         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3602         struct mlx5dv_dr_domain *domain;
3603         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3604         struct mlx5_flow_dv_encap_decap_resource *resource;
3605         uint32_t idx;
3606         int ret;
3607
3608         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3609                 domain = sh->fdb_domain;
3610         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3611                 domain = sh->rx_domain;
3612         else
3613                 domain = sh->tx_domain;
3614         /* Register new encap/decap resource. */
3615         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &idx);
3616         if (!resource) {
3617                 rte_flow_error_set(ctx->error, ENOMEM,
3618                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3619                                    "cannot allocate resource memory");
3620                 return NULL;
3621         }
3622         *resource = *ctx_resource;
3623         resource->idx = idx;
3624         ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->cdev->ctx,
3625                                                               domain, resource,
3626                                                              &resource->action);
3627         if (ret) {
3628                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3629                 rte_flow_error_set(ctx->error, ENOMEM,
3630                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3631                                    NULL, "cannot create action");
3632                 return NULL;
3633         }
3634
3635         return &resource->entry;
3636 }
3637
3638 struct mlx5_list_entry *
3639 flow_dv_encap_decap_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
3640                              void *cb_ctx)
3641 {
3642         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3643         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3644         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3645         uint32_t idx;
3646
3647         cache_resource = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3648                                            &idx);
3649         if (!cache_resource) {
3650                 rte_flow_error_set(ctx->error, ENOMEM,
3651                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3652                                    "cannot allocate resource memory");
3653                 return NULL;
3654         }
3655         memcpy(cache_resource, oentry, sizeof(*cache_resource));
3656         cache_resource->idx = idx;
3657         return &cache_resource->entry;
3658 }
3659
3660 void
3661 flow_dv_encap_decap_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3662 {
3663         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3664         struct mlx5_flow_dv_encap_decap_resource *res =
3665                                        container_of(entry, typeof(*res), entry);
3666
3667         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
3668 }
3669
3670 /**
3671  * Find existing encap/decap resource or create and register a new one.
3672  *
3673  * @param[in, out] dev
3674  *   Pointer to rte_eth_dev structure.
3675  * @param[in, out] resource
3676  *   Pointer to encap/decap resource.
3677  * @parm[in, out] dev_flow
3678  *   Pointer to the dev_flow.
3679  * @param[out] error
3680  *   pointer to error structure.
3681  *
3682  * @return
3683  *   0 on success otherwise -errno and errno is set.
3684  */
3685 static int
3686 flow_dv_encap_decap_resource_register
3687                         (struct rte_eth_dev *dev,
3688                          struct mlx5_flow_dv_encap_decap_resource *resource,
3689                          struct mlx5_flow *dev_flow,
3690                          struct rte_flow_error *error)
3691 {
3692         struct mlx5_priv *priv = dev->data->dev_private;
3693         struct mlx5_dev_ctx_shared *sh = priv->sh;
3694         struct mlx5_list_entry *entry;
3695         union {
3696                 struct {
3697                         uint32_t ft_type:8;
3698                         uint32_t refmt_type:8;
3699                         /*
3700                          * Header reformat actions can be shared between
3701                          * non-root tables. One bit to indicate non-root
3702                          * table or not.
3703                          */
3704                         uint32_t is_root:1;
3705                         uint32_t reserve:15;
3706                 };
3707                 uint32_t v32;
3708         } encap_decap_key = {
3709                 {
3710                         .ft_type = resource->ft_type,
3711                         .refmt_type = resource->reformat_type,
3712                         .is_root = !!dev_flow->dv.group,
3713                         .reserve = 0,
3714                 }
3715         };
3716         struct mlx5_flow_cb_ctx ctx = {
3717                 .error = error,
3718                 .data = resource,
3719         };
3720         struct mlx5_hlist *encaps_decaps;
3721         uint64_t key64;
3722
3723         encaps_decaps = flow_dv_hlist_prepare(sh, &sh->encaps_decaps,
3724                                 "encaps_decaps",
3725                                 MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ,
3726                                 true, true, sh,
3727                                 flow_dv_encap_decap_create_cb,
3728                                 flow_dv_encap_decap_match_cb,
3729                                 flow_dv_encap_decap_remove_cb,
3730                                 flow_dv_encap_decap_clone_cb,
3731                                 flow_dv_encap_decap_clone_free_cb);
3732         if (unlikely(!encaps_decaps))
3733                 return -rte_errno;
3734         resource->flags = dev_flow->dv.group ? 0 : 1;
3735         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3736                                  sizeof(encap_decap_key.v32), 0);
3737         if (resource->reformat_type !=
3738             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3739             resource->size)
3740                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3741         entry = mlx5_hlist_register(encaps_decaps, key64, &ctx);
3742         if (!entry)
3743                 return -rte_errno;
3744         resource = container_of(entry, typeof(*resource), entry);
3745         dev_flow->dv.encap_decap = resource;
3746         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3747         return 0;
3748 }
3749
3750 /**
3751  * Find existing table jump resource or create and register a new one.
3752  *
3753  * @param[in, out] dev
3754  *   Pointer to rte_eth_dev structure.
3755  * @param[in, out] tbl
3756  *   Pointer to flow table resource.
3757  * @parm[in, out] dev_flow
3758  *   Pointer to the dev_flow.
3759  * @param[out] error
3760  *   pointer to error structure.
3761  *
3762  * @return
3763  *   0 on success otherwise -errno and errno is set.
3764  */
3765 static int
3766 flow_dv_jump_tbl_resource_register
3767                         (struct rte_eth_dev *dev __rte_unused,
3768                          struct mlx5_flow_tbl_resource *tbl,
3769                          struct mlx5_flow *dev_flow,
3770                          struct rte_flow_error *error __rte_unused)
3771 {
3772         struct mlx5_flow_tbl_data_entry *tbl_data =
3773                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3774
3775         MLX5_ASSERT(tbl);
3776         MLX5_ASSERT(tbl_data->jump.action);
3777         dev_flow->handle->rix_jump = tbl_data->idx;
3778         dev_flow->dv.jump = &tbl_data->jump;
3779         return 0;
3780 }
3781
3782 int
3783 flow_dv_port_id_match_cb(void *tool_ctx __rte_unused,
3784                          struct mlx5_list_entry *entry, void *cb_ctx)
3785 {
3786         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3787         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3788         struct mlx5_flow_dv_port_id_action_resource *res =
3789                                        container_of(entry, typeof(*res), entry);
3790
3791         return ref->port_id != res->port_id;
3792 }
3793
3794 struct mlx5_list_entry *
3795 flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx)
3796 {
3797         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3798         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3799         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3800         struct mlx5_flow_dv_port_id_action_resource *resource;
3801         uint32_t idx;
3802         int ret;
3803
3804         /* Register new port id action resource. */
3805         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3806         if (!resource) {
3807                 rte_flow_error_set(ctx->error, ENOMEM,
3808                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3809                                    "cannot allocate port_id action memory");
3810                 return NULL;
3811         }
3812         *resource = *ref;
3813         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3814                                                         ref->port_id,
3815                                                         &resource->action);
3816         if (ret) {
3817                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3818                 rte_flow_error_set(ctx->error, ENOMEM,
3819                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3820                                    "cannot create action");
3821                 return NULL;
3822         }
3823         resource->idx = idx;
3824         return &resource->entry;
3825 }
3826
3827 struct mlx5_list_entry *
3828 flow_dv_port_id_clone_cb(void *tool_ctx,
3829                          struct mlx5_list_entry *entry __rte_unused,
3830                          void *cb_ctx)
3831 {
3832         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3833         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3834         struct mlx5_flow_dv_port_id_action_resource *resource;
3835         uint32_t idx;
3836
3837         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3838         if (!resource) {
3839                 rte_flow_error_set(ctx->error, ENOMEM,
3840                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3841                                    "cannot allocate port_id action memory");
3842                 return NULL;
3843         }
3844         memcpy(resource, entry, sizeof(*resource));
3845         resource->idx = idx;
3846         return &resource->entry;
3847 }
3848
3849 void
3850 flow_dv_port_id_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3851 {
3852         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3853         struct mlx5_flow_dv_port_id_action_resource *resource =
3854                                   container_of(entry, typeof(*resource), entry);
3855
3856         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
3857 }
3858
3859 /**
3860  * Find existing table port ID resource or create and register a new one.
3861  *
3862  * @param[in, out] dev
3863  *   Pointer to rte_eth_dev structure.
3864  * @param[in, out] ref
3865  *   Pointer to port ID action resource reference.
3866  * @parm[in, out] dev_flow
3867  *   Pointer to the dev_flow.
3868  * @param[out] error
3869  *   pointer to error structure.
3870  *
3871  * @return
3872  *   0 on success otherwise -errno and errno is set.
3873  */
3874 static int
3875 flow_dv_port_id_action_resource_register
3876                         (struct rte_eth_dev *dev,
3877                          struct mlx5_flow_dv_port_id_action_resource *ref,
3878                          struct mlx5_flow *dev_flow,
3879                          struct rte_flow_error *error)
3880 {
3881         struct mlx5_priv *priv = dev->data->dev_private;
3882         struct mlx5_list_entry *entry;
3883         struct mlx5_flow_dv_port_id_action_resource *resource;
3884         struct mlx5_flow_cb_ctx ctx = {
3885                 .error = error,
3886                 .data = ref,
3887         };
3888
3889         entry = mlx5_list_register(priv->sh->port_id_action_list, &ctx);
3890         if (!entry)
3891                 return -rte_errno;
3892         resource = container_of(entry, typeof(*resource), entry);
3893         dev_flow->dv.port_id_action = resource;
3894         dev_flow->handle->rix_port_id_action = resource->idx;
3895         return 0;
3896 }
3897
3898 int
3899 flow_dv_push_vlan_match_cb(void *tool_ctx __rte_unused,
3900                            struct mlx5_list_entry *entry, void *cb_ctx)
3901 {
3902         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3903         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3904         struct mlx5_flow_dv_push_vlan_action_resource *res =
3905                                        container_of(entry, typeof(*res), entry);
3906
3907         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3908 }
3909
3910 struct mlx5_list_entry *
3911 flow_dv_push_vlan_create_cb(void *tool_ctx, void *cb_ctx)
3912 {
3913         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3914         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3915         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3916         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3917         struct mlx5dv_dr_domain *domain;
3918         uint32_t idx;
3919         int ret;
3920
3921         /* Register new port id action resource. */
3922         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3923         if (!resource) {
3924                 rte_flow_error_set(ctx->error, ENOMEM,
3925                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3926                                    "cannot allocate push_vlan action memory");
3927                 return NULL;
3928         }
3929         *resource = *ref;
3930         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3931                 domain = sh->fdb_domain;
3932         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3933                 domain = sh->rx_domain;
3934         else
3935                 domain = sh->tx_domain;
3936         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3937                                                         &resource->action);
3938         if (ret) {
3939                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3940                 rte_flow_error_set(ctx->error, ENOMEM,
3941                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3942                                    "cannot create push vlan action");
3943                 return NULL;
3944         }
3945         resource->idx = idx;
3946         return &resource->entry;
3947 }
3948
3949 struct mlx5_list_entry *
3950 flow_dv_push_vlan_clone_cb(void *tool_ctx,
3951                            struct mlx5_list_entry *entry __rte_unused,
3952                            void *cb_ctx)
3953 {
3954         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3955         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3956         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3957         uint32_t idx;
3958
3959         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3960         if (!resource) {
3961                 rte_flow_error_set(ctx->error, ENOMEM,
3962                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3963                                    "cannot allocate push_vlan action memory");
3964                 return NULL;
3965         }
3966         memcpy(resource, entry, sizeof(*resource));
3967         resource->idx = idx;
3968         return &resource->entry;
3969 }
3970
3971 void
3972 flow_dv_push_vlan_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3973 {
3974         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3975         struct mlx5_flow_dv_push_vlan_action_resource *resource =
3976                                   container_of(entry, typeof(*resource), entry);
3977
3978         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
3979 }
3980
3981 /**
3982  * Find existing push vlan resource or create and register a new one.
3983  *
3984  * @param [in, out] dev
3985  *   Pointer to rte_eth_dev structure.
3986  * @param[in, out] ref
3987  *   Pointer to port ID action resource reference.
3988  * @parm[in, out] dev_flow
3989  *   Pointer to the dev_flow.
3990  * @param[out] error
3991  *   pointer to error structure.
3992  *
3993  * @return
3994  *   0 on success otherwise -errno and errno is set.
3995  */
3996 static int
3997 flow_dv_push_vlan_action_resource_register
3998                        (struct rte_eth_dev *dev,
3999                         struct mlx5_flow_dv_push_vlan_action_resource *ref,
4000                         struct mlx5_flow *dev_flow,
4001                         struct rte_flow_error *error)
4002 {
4003         struct mlx5_priv *priv = dev->data->dev_private;
4004         struct mlx5_flow_dv_push_vlan_action_resource *resource;
4005         struct mlx5_list_entry *entry;
4006         struct mlx5_flow_cb_ctx ctx = {
4007                 .error = error,
4008                 .data = ref,
4009         };
4010
4011         entry = mlx5_list_register(priv->sh->push_vlan_action_list, &ctx);
4012         if (!entry)
4013                 return -rte_errno;
4014         resource = container_of(entry, typeof(*resource), entry);
4015
4016         dev_flow->handle->dvh.rix_push_vlan = resource->idx;
4017         dev_flow->dv.push_vlan_res = resource;
4018         return 0;
4019 }
4020
4021 /**
4022  * Get the size of specific rte_flow_item_type hdr size
4023  *
4024  * @param[in] item_type
4025  *   Tested rte_flow_item_type.
4026  *
4027  * @return
4028  *   sizeof struct item_type, 0 if void or irrelevant.
4029  */
4030 static size_t
4031 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
4032 {
4033         size_t retval;
4034
4035         switch (item_type) {
4036         case RTE_FLOW_ITEM_TYPE_ETH:
4037                 retval = sizeof(struct rte_ether_hdr);
4038                 break;
4039         case RTE_FLOW_ITEM_TYPE_VLAN:
4040                 retval = sizeof(struct rte_vlan_hdr);
4041                 break;
4042         case RTE_FLOW_ITEM_TYPE_IPV4:
4043                 retval = sizeof(struct rte_ipv4_hdr);
4044                 break;
4045         case RTE_FLOW_ITEM_TYPE_IPV6:
4046                 retval = sizeof(struct rte_ipv6_hdr);
4047                 break;
4048         case RTE_FLOW_ITEM_TYPE_UDP:
4049                 retval = sizeof(struct rte_udp_hdr);
4050                 break;
4051         case RTE_FLOW_ITEM_TYPE_TCP:
4052                 retval = sizeof(struct rte_tcp_hdr);
4053                 break;
4054         case RTE_FLOW_ITEM_TYPE_VXLAN:
4055         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4056                 retval = sizeof(struct rte_vxlan_hdr);
4057                 break;
4058         case RTE_FLOW_ITEM_TYPE_GRE:
4059         case RTE_FLOW_ITEM_TYPE_NVGRE:
4060                 retval = sizeof(struct rte_gre_hdr);
4061                 break;
4062         case RTE_FLOW_ITEM_TYPE_MPLS:
4063                 retval = sizeof(struct rte_mpls_hdr);
4064                 break;
4065         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
4066         default:
4067                 retval = 0;
4068                 break;
4069         }
4070         return retval;
4071 }
4072
4073 #define MLX5_ENCAP_IPV4_VERSION         0x40
4074 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
4075 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
4076 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
4077 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
4078 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
4079 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
4080
4081 /**
4082  * Convert the encap action data from list of rte_flow_item to raw buffer
4083  *
4084  * @param[in] items
4085  *   Pointer to rte_flow_item objects list.
4086  * @param[out] buf
4087  *   Pointer to the output buffer.
4088  * @param[out] size
4089  *   Pointer to the output buffer size.
4090  * @param[out] error
4091  *   Pointer to the error structure.
4092  *
4093  * @return
4094  *   0 on success, a negative errno value otherwise and rte_errno is set.
4095  */
4096 static int
4097 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
4098                            size_t *size, struct rte_flow_error *error)
4099 {
4100         struct rte_ether_hdr *eth = NULL;
4101         struct rte_vlan_hdr *vlan = NULL;
4102         struct rte_ipv4_hdr *ipv4 = NULL;
4103         struct rte_ipv6_hdr *ipv6 = NULL;
4104         struct rte_udp_hdr *udp = NULL;
4105         struct rte_vxlan_hdr *vxlan = NULL;
4106         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
4107         struct rte_gre_hdr *gre = NULL;
4108         size_t len;
4109         size_t temp_size = 0;
4110
4111         if (!items)
4112                 return rte_flow_error_set(error, EINVAL,
4113                                           RTE_FLOW_ERROR_TYPE_ACTION,
4114                                           NULL, "invalid empty data");
4115         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4116                 len = flow_dv_get_item_hdr_len(items->type);
4117                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
4118                         return rte_flow_error_set(error, EINVAL,
4119                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4120                                                   (void *)items->type,
4121                                                   "items total size is too big"
4122                                                   " for encap action");
4123                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
4124                 switch (items->type) {
4125                 case RTE_FLOW_ITEM_TYPE_ETH:
4126                         eth = (struct rte_ether_hdr *)&buf[temp_size];
4127                         break;
4128                 case RTE_FLOW_ITEM_TYPE_VLAN:
4129                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
4130                         if (!eth)
4131                                 return rte_flow_error_set(error, EINVAL,
4132                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4133                                                 (void *)items->type,
4134                                                 "eth header not found");
4135                         if (!eth->ether_type)
4136                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
4137                         break;
4138                 case RTE_FLOW_ITEM_TYPE_IPV4:
4139                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
4140                         if (!vlan && !eth)
4141                                 return rte_flow_error_set(error, EINVAL,
4142                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4143                                                 (void *)items->type,
4144                                                 "neither eth nor vlan"
4145                                                 " header found");
4146                         if (vlan && !vlan->eth_proto)
4147                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4148                         else if (eth && !eth->ether_type)
4149                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4150                         if (!ipv4->version_ihl)
4151                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
4152                                                     MLX5_ENCAP_IPV4_IHL_MIN;
4153                         if (!ipv4->time_to_live)
4154                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
4155                         break;
4156                 case RTE_FLOW_ITEM_TYPE_IPV6:
4157                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
4158                         if (!vlan && !eth)
4159                                 return rte_flow_error_set(error, EINVAL,
4160                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4161                                                 (void *)items->type,
4162                                                 "neither eth nor vlan"
4163                                                 " header found");
4164                         if (vlan && !vlan->eth_proto)
4165                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4166                         else if (eth && !eth->ether_type)
4167                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4168                         if (!ipv6->vtc_flow)
4169                                 ipv6->vtc_flow =
4170                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
4171                         if (!ipv6->hop_limits)
4172                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
4173                         break;
4174                 case RTE_FLOW_ITEM_TYPE_UDP:
4175                         udp = (struct rte_udp_hdr *)&buf[temp_size];
4176                         if (!ipv4 && !ipv6)
4177                                 return rte_flow_error_set(error, EINVAL,
4178                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4179                                                 (void *)items->type,
4180                                                 "ip header not found");
4181                         if (ipv4 && !ipv4->next_proto_id)
4182                                 ipv4->next_proto_id = IPPROTO_UDP;
4183                         else if (ipv6 && !ipv6->proto)
4184                                 ipv6->proto = IPPROTO_UDP;
4185                         break;
4186                 case RTE_FLOW_ITEM_TYPE_VXLAN:
4187                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
4188                         if (!udp)
4189                                 return rte_flow_error_set(error, EINVAL,
4190                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4191                                                 (void *)items->type,
4192                                                 "udp header not found");
4193                         if (!udp->dst_port)
4194                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
4195                         if (!vxlan->vx_flags)
4196                                 vxlan->vx_flags =
4197                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
4198                         break;
4199                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4200                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
4201                         if (!udp)
4202                                 return rte_flow_error_set(error, EINVAL,
4203                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4204                                                 (void *)items->type,
4205                                                 "udp header not found");
4206                         if (!vxlan_gpe->proto)
4207                                 return rte_flow_error_set(error, EINVAL,
4208                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4209                                                 (void *)items->type,
4210                                                 "next protocol not found");
4211                         if (!udp->dst_port)
4212                                 udp->dst_port =
4213                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
4214                         if (!vxlan_gpe->vx_flags)
4215                                 vxlan_gpe->vx_flags =
4216                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
4217                         break;
4218                 case RTE_FLOW_ITEM_TYPE_GRE:
4219                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4220                         gre = (struct rte_gre_hdr *)&buf[temp_size];
4221                         if (!gre->proto)
4222                                 return rte_flow_error_set(error, EINVAL,
4223                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4224                                                 (void *)items->type,
4225                                                 "next protocol not found");
4226                         if (!ipv4 && !ipv6)
4227                                 return rte_flow_error_set(error, EINVAL,
4228                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4229                                                 (void *)items->type,
4230                                                 "ip header not found");
4231                         if (ipv4 && !ipv4->next_proto_id)
4232                                 ipv4->next_proto_id = IPPROTO_GRE;
4233                         else if (ipv6 && !ipv6->proto)
4234                                 ipv6->proto = IPPROTO_GRE;
4235                         break;
4236                 case RTE_FLOW_ITEM_TYPE_VOID:
4237                         break;
4238                 default:
4239                         return rte_flow_error_set(error, EINVAL,
4240                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4241                                                   (void *)items->type,
4242                                                   "unsupported item type");
4243                         break;
4244                 }
4245                 temp_size += len;
4246         }
4247         *size = temp_size;
4248         return 0;
4249 }
4250
4251 static int
4252 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4253 {
4254         struct rte_ether_hdr *eth = NULL;
4255         struct rte_vlan_hdr *vlan = NULL;
4256         struct rte_ipv6_hdr *ipv6 = NULL;
4257         struct rte_udp_hdr *udp = NULL;
4258         char *next_hdr;
4259         uint16_t proto;
4260
4261         eth = (struct rte_ether_hdr *)data;
4262         next_hdr = (char *)(eth + 1);
4263         proto = RTE_BE16(eth->ether_type);
4264
4265         /* VLAN skipping */
4266         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4267                 vlan = (struct rte_vlan_hdr *)next_hdr;
4268                 proto = RTE_BE16(vlan->eth_proto);
4269                 next_hdr += sizeof(struct rte_vlan_hdr);
4270         }
4271
4272         /* HW calculates IPv4 csum. no need to proceed */
4273         if (proto == RTE_ETHER_TYPE_IPV4)
4274                 return 0;
4275
4276         /* non IPv4/IPv6 header. not supported */
4277         if (proto != RTE_ETHER_TYPE_IPV6) {
4278                 return rte_flow_error_set(error, ENOTSUP,
4279                                           RTE_FLOW_ERROR_TYPE_ACTION,
4280                                           NULL, "Cannot offload non IPv4/IPv6");
4281         }
4282
4283         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4284
4285         /* ignore non UDP */
4286         if (ipv6->proto != IPPROTO_UDP)
4287                 return 0;
4288
4289         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4290         udp->dgram_cksum = 0;
4291
4292         return 0;
4293 }
4294
4295 /**
4296  * Convert L2 encap action to DV specification.
4297  *
4298  * @param[in] dev
4299  *   Pointer to rte_eth_dev structure.
4300  * @param[in] action
4301  *   Pointer to action structure.
4302  * @param[in, out] dev_flow
4303  *   Pointer to the mlx5_flow.
4304  * @param[in] transfer
4305  *   Mark if the flow is E-Switch flow.
4306  * @param[out] error
4307  *   Pointer to the error structure.
4308  *
4309  * @return
4310  *   0 on success, a negative errno value otherwise and rte_errno is set.
4311  */
4312 static int
4313 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4314                                const struct rte_flow_action *action,
4315                                struct mlx5_flow *dev_flow,
4316                                uint8_t transfer,
4317                                struct rte_flow_error *error)
4318 {
4319         const struct rte_flow_item *encap_data;
4320         const struct rte_flow_action_raw_encap *raw_encap_data;
4321         struct mlx5_flow_dv_encap_decap_resource res = {
4322                 .reformat_type =
4323                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4324                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4325                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4326         };
4327
4328         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4329                 raw_encap_data =
4330                         (const struct rte_flow_action_raw_encap *)action->conf;
4331                 res.size = raw_encap_data->size;
4332                 memcpy(res.buf, raw_encap_data->data, res.size);
4333         } else {
4334                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4335                         encap_data =
4336                                 ((const struct rte_flow_action_vxlan_encap *)
4337                                                 action->conf)->definition;
4338                 else
4339                         encap_data =
4340                                 ((const struct rte_flow_action_nvgre_encap *)
4341                                                 action->conf)->definition;
4342                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4343                                                &res.size, error))
4344                         return -rte_errno;
4345         }
4346         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4347                 return -rte_errno;
4348         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4349                 return rte_flow_error_set(error, EINVAL,
4350                                           RTE_FLOW_ERROR_TYPE_ACTION,
4351                                           NULL, "can't create L2 encap action");
4352         return 0;
4353 }
4354
4355 /**
4356  * Convert L2 decap action to DV specification.
4357  *
4358  * @param[in] dev
4359  *   Pointer to rte_eth_dev structure.
4360  * @param[in, out] dev_flow
4361  *   Pointer to the mlx5_flow.
4362  * @param[in] transfer
4363  *   Mark if the flow is E-Switch flow.
4364  * @param[out] error
4365  *   Pointer to the error structure.
4366  *
4367  * @return
4368  *   0 on success, a negative errno value otherwise and rte_errno is set.
4369  */
4370 static int
4371 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4372                                struct mlx5_flow *dev_flow,
4373                                uint8_t transfer,
4374                                struct rte_flow_error *error)
4375 {
4376         struct mlx5_flow_dv_encap_decap_resource res = {
4377                 .size = 0,
4378                 .reformat_type =
4379                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4380                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4381                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4382         };
4383
4384         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4385                 return rte_flow_error_set(error, EINVAL,
4386                                           RTE_FLOW_ERROR_TYPE_ACTION,
4387                                           NULL, "can't create L2 decap action");
4388         return 0;
4389 }
4390
4391 /**
4392  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4393  *
4394  * @param[in] dev
4395  *   Pointer to rte_eth_dev structure.
4396  * @param[in] action
4397  *   Pointer to action structure.
4398  * @param[in, out] dev_flow
4399  *   Pointer to the mlx5_flow.
4400  * @param[in] attr
4401  *   Pointer to the flow attributes.
4402  * @param[out] error
4403  *   Pointer to the error structure.
4404  *
4405  * @return
4406  *   0 on success, a negative errno value otherwise and rte_errno is set.
4407  */
4408 static int
4409 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4410                                 const struct rte_flow_action *action,
4411                                 struct mlx5_flow *dev_flow,
4412                                 const struct rte_flow_attr *attr,
4413                                 struct rte_flow_error *error)
4414 {
4415         const struct rte_flow_action_raw_encap *encap_data;
4416         struct mlx5_flow_dv_encap_decap_resource res;
4417
4418         memset(&res, 0, sizeof(res));
4419         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4420         res.size = encap_data->size;
4421         memcpy(res.buf, encap_data->data, res.size);
4422         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4423                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4424                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4425         if (attr->transfer)
4426                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4427         else
4428                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4429                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4430         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4431                 return rte_flow_error_set(error, EINVAL,
4432                                           RTE_FLOW_ERROR_TYPE_ACTION,
4433                                           NULL, "can't create encap action");
4434         return 0;
4435 }
4436
4437 /**
4438  * Create action push VLAN.
4439  *
4440  * @param[in] dev
4441  *   Pointer to rte_eth_dev structure.
4442  * @param[in] attr
4443  *   Pointer to the flow attributes.
4444  * @param[in] vlan
4445  *   Pointer to the vlan to push to the Ethernet header.
4446  * @param[in, out] dev_flow
4447  *   Pointer to the mlx5_flow.
4448  * @param[out] error
4449  *   Pointer to the error structure.
4450  *
4451  * @return
4452  *   0 on success, a negative errno value otherwise and rte_errno is set.
4453  */
4454 static int
4455 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4456                                 const struct rte_flow_attr *attr,
4457                                 const struct rte_vlan_hdr *vlan,
4458                                 struct mlx5_flow *dev_flow,
4459                                 struct rte_flow_error *error)
4460 {
4461         struct mlx5_flow_dv_push_vlan_action_resource res;
4462
4463         memset(&res, 0, sizeof(res));
4464         res.vlan_tag =
4465                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4466                                  vlan->vlan_tci);
4467         if (attr->transfer)
4468                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4469         else
4470                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4471                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4472         return flow_dv_push_vlan_action_resource_register
4473                                             (dev, &res, dev_flow, error);
4474 }
4475
4476 /**
4477  * Validate the modify-header actions.
4478  *
4479  * @param[in] action_flags
4480  *   Holds the actions detected until now.
4481  * @param[in] action
4482  *   Pointer to the modify action.
4483  * @param[out] error
4484  *   Pointer to error structure.
4485  *
4486  * @return
4487  *   0 on success, a negative errno value otherwise and rte_errno is set.
4488  */
4489 static int
4490 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4491                                    const struct rte_flow_action *action,
4492                                    struct rte_flow_error *error)
4493 {
4494         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4495                 return rte_flow_error_set(error, EINVAL,
4496                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4497                                           NULL, "action configuration not set");
4498         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4499                 return rte_flow_error_set(error, EINVAL,
4500                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4501                                           "can't have encap action before"
4502                                           " modify action");
4503         return 0;
4504 }
4505
4506 /**
4507  * Validate the modify-header MAC address actions.
4508  *
4509  * @param[in] action_flags
4510  *   Holds the actions detected until now.
4511  * @param[in] action
4512  *   Pointer to the modify action.
4513  * @param[in] item_flags
4514  *   Holds the items detected.
4515  * @param[out] error
4516  *   Pointer to error structure.
4517  *
4518  * @return
4519  *   0 on success, a negative errno value otherwise and rte_errno is set.
4520  */
4521 static int
4522 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4523                                    const struct rte_flow_action *action,
4524                                    const uint64_t item_flags,
4525                                    struct rte_flow_error *error)
4526 {
4527         int ret = 0;
4528
4529         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4530         if (!ret) {
4531                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4532                         return rte_flow_error_set(error, EINVAL,
4533                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4534                                                   NULL,
4535                                                   "no L2 item in pattern");
4536         }
4537         return ret;
4538 }
4539
4540 /**
4541  * Validate the modify-header IPv4 address actions.
4542  *
4543  * @param[in] action_flags
4544  *   Holds the actions detected until now.
4545  * @param[in] action
4546  *   Pointer to the modify action.
4547  * @param[in] item_flags
4548  *   Holds the items detected.
4549  * @param[out] error
4550  *   Pointer to error structure.
4551  *
4552  * @return
4553  *   0 on success, a negative errno value otherwise and rte_errno is set.
4554  */
4555 static int
4556 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4557                                     const struct rte_flow_action *action,
4558                                     const uint64_t item_flags,
4559                                     struct rte_flow_error *error)
4560 {
4561         int ret = 0;
4562         uint64_t layer;
4563
4564         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4565         if (!ret) {
4566                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4567                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4568                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4569                 if (!(item_flags & layer))
4570                         return rte_flow_error_set(error, EINVAL,
4571                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4572                                                   NULL,
4573                                                   "no ipv4 item in pattern");
4574         }
4575         return ret;
4576 }
4577
4578 /**
4579  * Validate the modify-header IPv6 address actions.
4580  *
4581  * @param[in] action_flags
4582  *   Holds the actions detected until now.
4583  * @param[in] action
4584  *   Pointer to the modify action.
4585  * @param[in] item_flags
4586  *   Holds the items detected.
4587  * @param[out] error
4588  *   Pointer to error structure.
4589  *
4590  * @return
4591  *   0 on success, a negative errno value otherwise and rte_errno is set.
4592  */
4593 static int
4594 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4595                                     const struct rte_flow_action *action,
4596                                     const uint64_t item_flags,
4597                                     struct rte_flow_error *error)
4598 {
4599         int ret = 0;
4600         uint64_t layer;
4601
4602         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4603         if (!ret) {
4604                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4605                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4606                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4607                 if (!(item_flags & layer))
4608                         return rte_flow_error_set(error, EINVAL,
4609                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4610                                                   NULL,
4611                                                   "no ipv6 item in pattern");
4612         }
4613         return ret;
4614 }
4615
4616 /**
4617  * Validate the modify-header TP actions.
4618  *
4619  * @param[in] action_flags
4620  *   Holds the actions detected until now.
4621  * @param[in] action
4622  *   Pointer to the modify action.
4623  * @param[in] item_flags
4624  *   Holds the items detected.
4625  * @param[out] error
4626  *   Pointer to error structure.
4627  *
4628  * @return
4629  *   0 on success, a negative errno value otherwise and rte_errno is set.
4630  */
4631 static int
4632 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4633                                   const struct rte_flow_action *action,
4634                                   const uint64_t item_flags,
4635                                   struct rte_flow_error *error)
4636 {
4637         int ret = 0;
4638         uint64_t layer;
4639
4640         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4641         if (!ret) {
4642                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4643                                  MLX5_FLOW_LAYER_INNER_L4 :
4644                                  MLX5_FLOW_LAYER_OUTER_L4;
4645                 if (!(item_flags & layer))
4646                         return rte_flow_error_set(error, EINVAL,
4647                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4648                                                   NULL, "no transport layer "
4649                                                   "in pattern");
4650         }
4651         return ret;
4652 }
4653
4654 /**
4655  * Validate the modify-header actions of increment/decrement
4656  * TCP Sequence-number.
4657  *
4658  * @param[in] action_flags
4659  *   Holds the actions detected until now.
4660  * @param[in] action
4661  *   Pointer to the modify action.
4662  * @param[in] item_flags
4663  *   Holds the items detected.
4664  * @param[out] error
4665  *   Pointer to error structure.
4666  *
4667  * @return
4668  *   0 on success, a negative errno value otherwise and rte_errno is set.
4669  */
4670 static int
4671 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4672                                        const struct rte_flow_action *action,
4673                                        const uint64_t item_flags,
4674                                        struct rte_flow_error *error)
4675 {
4676         int ret = 0;
4677         uint64_t layer;
4678
4679         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4680         if (!ret) {
4681                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4682                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4683                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4684                 if (!(item_flags & layer))
4685                         return rte_flow_error_set(error, EINVAL,
4686                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4687                                                   NULL, "no TCP item in"
4688                                                   " pattern");
4689                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4690                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4691                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4692                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4693                         return rte_flow_error_set(error, EINVAL,
4694                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4695                                                   NULL,
4696                                                   "cannot decrease and increase"
4697                                                   " TCP sequence number"
4698                                                   " at the same time");
4699         }
4700         return ret;
4701 }
4702
4703 /**
4704  * Validate the modify-header actions of increment/decrement
4705  * TCP Acknowledgment number.
4706  *
4707  * @param[in] action_flags
4708  *   Holds the actions detected until now.
4709  * @param[in] action
4710  *   Pointer to the modify action.
4711  * @param[in] item_flags
4712  *   Holds the items detected.
4713  * @param[out] error
4714  *   Pointer to error structure.
4715  *
4716  * @return
4717  *   0 on success, a negative errno value otherwise and rte_errno is set.
4718  */
4719 static int
4720 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4721                                        const struct rte_flow_action *action,
4722                                        const uint64_t item_flags,
4723                                        struct rte_flow_error *error)
4724 {
4725         int ret = 0;
4726         uint64_t layer;
4727
4728         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4729         if (!ret) {
4730                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4731                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4732                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4733                 if (!(item_flags & layer))
4734                         return rte_flow_error_set(error, EINVAL,
4735                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4736                                                   NULL, "no TCP item in"
4737                                                   " pattern");
4738                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4739                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4740                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4741                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4742                         return rte_flow_error_set(error, EINVAL,
4743                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4744                                                   NULL,
4745                                                   "cannot decrease and increase"
4746                                                   " TCP acknowledgment number"
4747                                                   " at the same time");
4748         }
4749         return ret;
4750 }
4751
4752 /**
4753  * Validate the modify-header TTL actions.
4754  *
4755  * @param[in] action_flags
4756  *   Holds the actions detected until now.
4757  * @param[in] action
4758  *   Pointer to the modify action.
4759  * @param[in] item_flags
4760  *   Holds the items detected.
4761  * @param[out] error
4762  *   Pointer to error structure.
4763  *
4764  * @return
4765  *   0 on success, a negative errno value otherwise and rte_errno is set.
4766  */
4767 static int
4768 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4769                                    const struct rte_flow_action *action,
4770                                    const uint64_t item_flags,
4771                                    struct rte_flow_error *error)
4772 {
4773         int ret = 0;
4774         uint64_t layer;
4775
4776         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4777         if (!ret) {
4778                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4779                                  MLX5_FLOW_LAYER_INNER_L3 :
4780                                  MLX5_FLOW_LAYER_OUTER_L3;
4781                 if (!(item_flags & layer))
4782                         return rte_flow_error_set(error, EINVAL,
4783                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4784                                                   NULL,
4785                                                   "no IP protocol in pattern");
4786         }
4787         return ret;
4788 }
4789
4790 /**
4791  * Validate the generic modify field actions.
4792  * @param[in] dev
4793  *   Pointer to the rte_eth_dev structure.
4794  * @param[in] action_flags
4795  *   Holds the actions detected until now.
4796  * @param[in] action
4797  *   Pointer to the modify action.
4798  * @param[in] attr
4799  *   Pointer to the flow attributes.
4800  * @param[out] error
4801  *   Pointer to error structure.
4802  *
4803  * @return
4804  *   Number of header fields to modify (0 or more) on success,
4805  *   a negative errno value otherwise and rte_errno is set.
4806  */
4807 static int
4808 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4809                                    const uint64_t action_flags,
4810                                    const struct rte_flow_action *action,
4811                                    const struct rte_flow_attr *attr,
4812                                    struct rte_flow_error *error)
4813 {
4814         int ret = 0;
4815         struct mlx5_priv *priv = dev->data->dev_private;
4816         struct mlx5_dev_config *config = &priv->config;
4817         const struct rte_flow_action_modify_field *action_modify_field =
4818                 action->conf;
4819         uint32_t dst_width = mlx5_flow_item_field_width(dev,
4820                                 action_modify_field->dst.field,
4821                                 -1, attr, error);
4822         uint32_t src_width = mlx5_flow_item_field_width(dev,
4823                                 action_modify_field->src.field,
4824                                 dst_width, attr, error);
4825
4826         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4827         if (ret)
4828                 return ret;
4829
4830         if (action_modify_field->width == 0)
4831                 return rte_flow_error_set(error, EINVAL,
4832                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4833                                 "no bits are requested to be modified");
4834         else if (action_modify_field->width > dst_width ||
4835                  action_modify_field->width > src_width)
4836                 return rte_flow_error_set(error, EINVAL,
4837                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4838                                 "cannot modify more bits than"
4839                                 " the width of a field");
4840         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4841             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4842                 if ((action_modify_field->dst.offset +
4843                      action_modify_field->width > dst_width) ||
4844                     (action_modify_field->dst.offset % 32))
4845                         return rte_flow_error_set(error, EINVAL,
4846                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4847                                         "destination offset is too big"
4848                                         " or not aligned to 4 bytes");
4849                 if (action_modify_field->dst.level &&
4850                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4851                         return rte_flow_error_set(error, ENOTSUP,
4852                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4853                                         "inner header fields modification"
4854                                         " is not supported");
4855         }
4856         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4857             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4858                 if (!attr->transfer && !attr->group)
4859                         return rte_flow_error_set(error, ENOTSUP,
4860                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4861                                         "modify field action is not"
4862                                         " supported for group 0");
4863                 if ((action_modify_field->src.offset +
4864                      action_modify_field->width > src_width) ||
4865                     (action_modify_field->src.offset % 32))
4866                         return rte_flow_error_set(error, EINVAL,
4867                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4868                                         "source offset is too big"
4869                                         " or not aligned to 4 bytes");
4870                 if (action_modify_field->src.level &&
4871                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4872                         return rte_flow_error_set(error, ENOTSUP,
4873                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4874                                         "inner header fields modification"
4875                                         " is not supported");
4876         }
4877         if ((action_modify_field->dst.field ==
4878              action_modify_field->src.field) &&
4879             (action_modify_field->dst.level ==
4880              action_modify_field->src.level))
4881                 return rte_flow_error_set(error, EINVAL,
4882                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4883                                 "source and destination fields"
4884                                 " cannot be the same");
4885         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4886             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER ||
4887             action_modify_field->dst.field == RTE_FLOW_FIELD_MARK)
4888                 return rte_flow_error_set(error, EINVAL,
4889                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4890                                 "mark, immediate value or a pointer to it"
4891                                 " cannot be used as a destination");
4892         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4893             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4894                 return rte_flow_error_set(error, ENOTSUP,
4895                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4896                                 "modifications of an arbitrary"
4897                                 " place in a packet is not supported");
4898         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4899             action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4900                 return rte_flow_error_set(error, ENOTSUP,
4901                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4902                                 "modifications of the 802.1Q Tag"
4903                                 " Identifier is not supported");
4904         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4905             action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4906                 return rte_flow_error_set(error, ENOTSUP,
4907                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4908                                 "modifications of the VXLAN Network"
4909                                 " Identifier is not supported");
4910         if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4911             action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4912                 return rte_flow_error_set(error, ENOTSUP,
4913                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4914                                 "modifications of the GENEVE Network"
4915                                 " Identifier is not supported");
4916         if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4917             action_modify_field->src.field == RTE_FLOW_FIELD_MARK)
4918                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4919                     !mlx5_flow_ext_mreg_supported(dev))
4920                         return rte_flow_error_set(error, ENOTSUP,
4921                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4922                                         "cannot modify mark in legacy mode"
4923                                         " or without extensive registers");
4924         if (action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
4925             action_modify_field->src.field == RTE_FLOW_FIELD_META) {
4926                 if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
4927                     !mlx5_flow_ext_mreg_supported(dev))
4928                         return rte_flow_error_set(error, ENOTSUP,
4929                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4930                                         "cannot modify meta without"
4931                                         " extensive registers support");
4932                 ret = flow_dv_get_metadata_reg(dev, attr, error);
4933                 if (ret < 0 || ret == REG_NON)
4934                         return rte_flow_error_set(error, ENOTSUP,
4935                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4936                                         "cannot modify meta without"
4937                                         " extensive registers available");
4938         }
4939         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4940                 return rte_flow_error_set(error, ENOTSUP,
4941                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4942                                 "add and sub operations"
4943                                 " are not supported");
4944         return (action_modify_field->width / 32) +
4945                !!(action_modify_field->width % 32);
4946 }
4947
4948 /**
4949  * Validate jump action.
4950  *
4951  * @param[in] action
4952  *   Pointer to the jump action.
4953  * @param[in] action_flags
4954  *   Holds the actions detected until now.
4955  * @param[in] attributes
4956  *   Pointer to flow attributes
4957  * @param[in] external
4958  *   Action belongs to flow rule created by request external to PMD.
4959  * @param[out] error
4960  *   Pointer to error structure.
4961  *
4962  * @return
4963  *   0 on success, a negative errno value otherwise and rte_errno is set.
4964  */
4965 static int
4966 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4967                              const struct mlx5_flow_tunnel *tunnel,
4968                              const struct rte_flow_action *action,
4969                              uint64_t action_flags,
4970                              const struct rte_flow_attr *attributes,
4971                              bool external, struct rte_flow_error *error)
4972 {
4973         uint32_t target_group, table;
4974         int ret = 0;
4975         struct flow_grp_info grp_info = {
4976                 .external = !!external,
4977                 .transfer = !!attributes->transfer,
4978                 .fdb_def_rule = 1,
4979                 .std_tbl_fix = 0
4980         };
4981         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4982                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4983                 return rte_flow_error_set(error, EINVAL,
4984                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4985                                           "can't have 2 fate actions in"
4986                                           " same flow");
4987         if (!action->conf)
4988                 return rte_flow_error_set(error, EINVAL,
4989                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4990                                           NULL, "action configuration not set");
4991         target_group =
4992                 ((const struct rte_flow_action_jump *)action->conf)->group;
4993         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4994                                        &grp_info, error);
4995         if (ret)
4996                 return ret;
4997         if (attributes->group == target_group &&
4998             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4999                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
5000                 return rte_flow_error_set(error, EINVAL,
5001                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5002                                           "target group must be other than"
5003                                           " the current flow group");
5004         return 0;
5005 }
5006
5007 /*
5008  * Validate action PORT_ID / REPRESENTED_PORT.
5009  *
5010  * @param[in] dev
5011  *   Pointer to rte_eth_dev structure.
5012  * @param[in] action_flags
5013  *   Bit-fields that holds the actions detected until now.
5014  * @param[in] action
5015  *   PORT_ID / REPRESENTED_PORT action structure.
5016  * @param[in] attr
5017  *   Attributes of flow that includes this action.
5018  * @param[out] error
5019  *   Pointer to error structure.
5020  *
5021  * @return
5022  *   0 on success, a negative errno value otherwise and rte_errno is set.
5023  */
5024 static int
5025 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
5026                                 uint64_t action_flags,
5027                                 const struct rte_flow_action *action,
5028                                 const struct rte_flow_attr *attr,
5029                                 struct rte_flow_error *error)
5030 {
5031         const struct rte_flow_action_port_id *port_id;
5032         const struct rte_flow_action_ethdev *ethdev;
5033         struct mlx5_priv *act_priv;
5034         struct mlx5_priv *dev_priv;
5035         uint16_t port;
5036
5037         if (!attr->transfer)
5038                 return rte_flow_error_set(error, ENOTSUP,
5039                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5040                                           NULL,
5041                                           "port action is valid in transfer"
5042                                           " mode only");
5043         if (!action || !action->conf)
5044                 return rte_flow_error_set(error, ENOTSUP,
5045                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5046                                           NULL,
5047                                           "port action parameters must be"
5048                                           " specified");
5049         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
5050                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5051                 return rte_flow_error_set(error, EINVAL,
5052                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5053                                           "can have only one fate actions in"
5054                                           " a flow");
5055         dev_priv = mlx5_dev_to_eswitch_info(dev);
5056         if (!dev_priv)
5057                 return rte_flow_error_set(error, rte_errno,
5058                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5059                                           NULL,
5060                                           "failed to obtain E-Switch info");
5061         switch (action->type) {
5062         case RTE_FLOW_ACTION_TYPE_PORT_ID:
5063                 port_id = action->conf;
5064                 port = port_id->original ? dev->data->port_id : port_id->id;
5065                 break;
5066         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5067                 ethdev = action->conf;
5068                 port = ethdev->port_id;
5069                 break;
5070         default:
5071                 MLX5_ASSERT(false);
5072                 return rte_flow_error_set
5073                                 (error, EINVAL,
5074                                  RTE_FLOW_ERROR_TYPE_ACTION, action,
5075                                  "unknown E-Switch action");
5076         }
5077         act_priv = mlx5_port_to_eswitch_info(port, false);
5078         if (!act_priv)
5079                 return rte_flow_error_set
5080                                 (error, rte_errno,
5081                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, action->conf,
5082                                  "failed to obtain E-Switch port id for port");
5083         if (act_priv->domain_id != dev_priv->domain_id)
5084                 return rte_flow_error_set
5085                                 (error, EINVAL,
5086                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5087                                  "port does not belong to"
5088                                  " E-Switch being configured");
5089         return 0;
5090 }
5091
5092 /**
5093  * Get the maximum number of modify header actions.
5094  *
5095  * @param dev
5096  *   Pointer to rte_eth_dev structure.
5097  * @param root
5098  *   Whether action is on root table.
5099  *
5100  * @return
5101  *   Max number of modify header actions device can support.
5102  */
5103 static inline unsigned int
5104 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
5105                               bool root)
5106 {
5107         /*
5108          * There's no way to directly query the max capacity from FW.
5109          * The maximal value on root table should be assumed to be supported.
5110          */
5111         if (!root)
5112                 return MLX5_MAX_MODIFY_NUM;
5113         else
5114                 return MLX5_ROOT_TBL_MODIFY_NUM;
5115 }
5116
5117 /**
5118  * Validate the meter action.
5119  *
5120  * @param[in] dev
5121  *   Pointer to rte_eth_dev structure.
5122  * @param[in] action_flags
5123  *   Bit-fields that holds the actions detected until now.
5124  * @param[in] action
5125  *   Pointer to the meter action.
5126  * @param[in] attr
5127  *   Attributes of flow that includes this action.
5128  * @param[in] port_id_item
5129  *   Pointer to item indicating port id.
5130  * @param[out] error
5131  *   Pointer to error structure.
5132  *
5133  * @return
5134  *   0 on success, a negative errno value otherwise and rte_ernno is set.
5135  */
5136 static int
5137 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
5138                                 uint64_t action_flags,
5139                                 const struct rte_flow_action *action,
5140                                 const struct rte_flow_attr *attr,
5141                                 const struct rte_flow_item *port_id_item,
5142                                 bool *def_policy,
5143                                 struct rte_flow_error *error)
5144 {
5145         struct mlx5_priv *priv = dev->data->dev_private;
5146         const struct rte_flow_action_meter *am = action->conf;
5147         struct mlx5_flow_meter_info *fm;
5148         struct mlx5_flow_meter_policy *mtr_policy;
5149         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
5150
5151         if (!am)
5152                 return rte_flow_error_set(error, EINVAL,
5153                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5154                                           "meter action conf is NULL");
5155
5156         if (action_flags & MLX5_FLOW_ACTION_METER)
5157                 return rte_flow_error_set(error, ENOTSUP,
5158                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5159                                           "meter chaining not support");
5160         if (action_flags & MLX5_FLOW_ACTION_JUMP)
5161                 return rte_flow_error_set(error, ENOTSUP,
5162                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5163                                           "meter with jump not support");
5164         if (!priv->mtr_en)
5165                 return rte_flow_error_set(error, ENOTSUP,
5166                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5167                                           NULL,
5168                                           "meter action not supported");
5169         fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
5170         if (!fm)
5171                 return rte_flow_error_set(error, EINVAL,
5172                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5173                                           "Meter not found");
5174         /* aso meter can always be shared by different domains */
5175         if (fm->ref_cnt && !priv->sh->meter_aso_en &&
5176             !(fm->transfer == attr->transfer ||
5177               (!fm->ingress && !attr->ingress && attr->egress) ||
5178               (!fm->egress && !attr->egress && attr->ingress)))
5179                 return rte_flow_error_set(error, EINVAL,
5180                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5181                         "Flow attributes domain are either invalid "
5182                         "or have a domain conflict with current "
5183                         "meter attributes");
5184         if (fm->def_policy) {
5185                 if (!((attr->transfer &&
5186                         mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
5187                         (attr->egress &&
5188                         mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
5189                         (attr->ingress &&
5190                         mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
5191                         return rte_flow_error_set(error, EINVAL,
5192                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5193                                           "Flow attributes domain "
5194                                           "have a conflict with current "
5195                                           "meter domain attributes");
5196                 *def_policy = true;
5197         } else {
5198                 mtr_policy = mlx5_flow_meter_policy_find(dev,
5199                                                 fm->policy_id, NULL);
5200                 if (!mtr_policy)
5201                         return rte_flow_error_set(error, EINVAL,
5202                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5203                                           "Invalid policy id for meter ");
5204                 if (!((attr->transfer && mtr_policy->transfer) ||
5205                         (attr->egress && mtr_policy->egress) ||
5206                         (attr->ingress && mtr_policy->ingress)))
5207                         return rte_flow_error_set(error, EINVAL,
5208                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5209                                           "Flow attributes domain "
5210                                           "have a conflict with current "
5211                                           "meter domain attributes");
5212                 if (attr->transfer && mtr_policy->dev) {
5213                         /**
5214                          * When policy has fate action of port_id,
5215                          * the flow should have the same src port as policy.
5216                          */
5217                         struct mlx5_priv *policy_port_priv =
5218                                         mtr_policy->dev->data->dev_private;
5219                         int32_t flow_src_port = priv->representor_id;
5220
5221                         if (port_id_item) {
5222                                 const struct rte_flow_item_port_id *spec =
5223                                                         port_id_item->spec;
5224                                 struct mlx5_priv *port_priv =
5225                                         mlx5_port_to_eswitch_info(spec->id,
5226                                                                   false);
5227                                 if (!port_priv)
5228                                         return rte_flow_error_set(error,
5229                                                 rte_errno,
5230                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5231                                                 spec,
5232                                                 "Failed to get port info.");
5233                                 flow_src_port = port_priv->representor_id;
5234                         }
5235                         if (flow_src_port != policy_port_priv->representor_id)
5236                                 return rte_flow_error_set(error,
5237                                                 rte_errno,
5238                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5239                                                 NULL,
5240                                                 "Flow and meter policy "
5241                                                 "have different src port.");
5242                 }
5243                 *def_policy = false;
5244         }
5245         return 0;
5246 }
5247
5248 /**
5249  * Validate the age action.
5250  *
5251  * @param[in] action_flags
5252  *   Holds the actions detected until now.
5253  * @param[in] action
5254  *   Pointer to the age action.
5255  * @param[in] dev
5256  *   Pointer to the Ethernet device structure.
5257  * @param[out] error
5258  *   Pointer to error structure.
5259  *
5260  * @return
5261  *   0 on success, a negative errno value otherwise and rte_errno is set.
5262  */
5263 static int
5264 flow_dv_validate_action_age(uint64_t action_flags,
5265                             const struct rte_flow_action *action,
5266                             struct rte_eth_dev *dev,
5267                             struct rte_flow_error *error)
5268 {
5269         struct mlx5_priv *priv = dev->data->dev_private;
5270         const struct rte_flow_action_age *age = action->conf;
5271
5272         if (!priv->sh->devx || (priv->sh->cmng.counter_fallback &&
5273             !priv->sh->aso_age_mng))
5274                 return rte_flow_error_set(error, ENOTSUP,
5275                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5276                                           NULL,
5277                                           "age action not supported");
5278         if (!(action->conf))
5279                 return rte_flow_error_set(error, EINVAL,
5280                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5281                                           "configuration cannot be null");
5282         if (!(age->timeout))
5283                 return rte_flow_error_set(error, EINVAL,
5284                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5285                                           "invalid timeout value 0");
5286         if (action_flags & MLX5_FLOW_ACTION_AGE)
5287                 return rte_flow_error_set(error, EINVAL,
5288                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5289                                           "duplicate age actions set");
5290         return 0;
5291 }
5292
5293 /**
5294  * Validate the modify-header IPv4 DSCP actions.
5295  *
5296  * @param[in] action_flags
5297  *   Holds the actions detected until now.
5298  * @param[in] action
5299  *   Pointer to the modify action.
5300  * @param[in] item_flags
5301  *   Holds the items detected.
5302  * @param[out] error
5303  *   Pointer to error structure.
5304  *
5305  * @return
5306  *   0 on success, a negative errno value otherwise and rte_errno is set.
5307  */
5308 static int
5309 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5310                                          const struct rte_flow_action *action,
5311                                          const uint64_t item_flags,
5312                                          struct rte_flow_error *error)
5313 {
5314         int ret = 0;
5315
5316         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5317         if (!ret) {
5318                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5319                         return rte_flow_error_set(error, EINVAL,
5320                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5321                                                   NULL,
5322                                                   "no ipv4 item in pattern");
5323         }
5324         return ret;
5325 }
5326
5327 /**
5328  * Validate the modify-header IPv6 DSCP actions.
5329  *
5330  * @param[in] action_flags
5331  *   Holds the actions detected until now.
5332  * @param[in] action
5333  *   Pointer to the modify action.
5334  * @param[in] item_flags
5335  *   Holds the items detected.
5336  * @param[out] error
5337  *   Pointer to error structure.
5338  *
5339  * @return
5340  *   0 on success, a negative errno value otherwise and rte_errno is set.
5341  */
5342 static int
5343 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5344                                          const struct rte_flow_action *action,
5345                                          const uint64_t item_flags,
5346                                          struct rte_flow_error *error)
5347 {
5348         int ret = 0;
5349
5350         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5351         if (!ret) {
5352                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5353                         return rte_flow_error_set(error, EINVAL,
5354                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5355                                                   NULL,
5356                                                   "no ipv6 item in pattern");
5357         }
5358         return ret;
5359 }
5360
5361 int
5362 flow_dv_modify_match_cb(void *tool_ctx __rte_unused,
5363                         struct mlx5_list_entry *entry, void *cb_ctx)
5364 {
5365         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5366         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5367         struct mlx5_flow_dv_modify_hdr_resource *resource =
5368                                   container_of(entry, typeof(*resource), entry);
5369         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5370
5371         key_len += ref->actions_num * sizeof(ref->actions[0]);
5372         return ref->actions_num != resource->actions_num ||
5373                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5374 }
5375
5376 static struct mlx5_indexed_pool *
5377 flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
5378 {
5379         struct mlx5_indexed_pool *ipool = __atomic_load_n
5380                                      (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);
5381
5382         if (!ipool) {
5383                 struct mlx5_indexed_pool *expected = NULL;
5384                 struct mlx5_indexed_pool_config cfg =
5385                     (struct mlx5_indexed_pool_config) {
5386                        .size = sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
5387                                                                    (index + 1) *
5388                                            sizeof(struct mlx5_modification_cmd),
5389                        .trunk_size = 64,
5390                        .grow_trunk = 3,
5391                        .grow_shift = 2,
5392                        .need_lock = 1,
5393                        .release_mem_en = !!sh->reclaim_mode,
5394                        .per_core_cache = sh->reclaim_mode ? 0 : (1 << 16),
5395                        .malloc = mlx5_malloc,
5396                        .free = mlx5_free,
5397                        .type = "mlx5_modify_action_resource",
5398                 };
5399
5400                 cfg.size = RTE_ALIGN(cfg.size, sizeof(ipool));
5401                 ipool = mlx5_ipool_create(&cfg);
5402                 if (!ipool)
5403                         return NULL;
5404                 if (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],
5405                                                  &expected, ipool, false,
5406                                                  __ATOMIC_SEQ_CST,
5407                                                  __ATOMIC_SEQ_CST)) {
5408                         mlx5_ipool_destroy(ipool);
5409                         ipool = __atomic_load_n(&sh->mdh_ipools[index],
5410                                                 __ATOMIC_SEQ_CST);
5411                 }
5412         }
5413         return ipool;
5414 }
5415
5416 struct mlx5_list_entry *
5417 flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx)
5418 {
5419         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5420         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5421         struct mlx5dv_dr_domain *ns;
5422         struct mlx5_flow_dv_modify_hdr_resource *entry;
5423         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5424         struct mlx5_indexed_pool *ipool = flow_dv_modify_ipool_get(sh,
5425                                                           ref->actions_num - 1);
5426         int ret;
5427         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5428         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5429         uint32_t idx;
5430
5431         if (unlikely(!ipool)) {
5432                 rte_flow_error_set(ctx->error, ENOMEM,
5433                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5434                                    NULL, "cannot allocate modify ipool");
5435                 return NULL;
5436         }
5437         entry = mlx5_ipool_zmalloc(ipool, &idx);
5438         if (!entry) {
5439                 rte_flow_error_set(ctx->error, ENOMEM,
5440                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5441                                    "cannot allocate resource memory");
5442                 return NULL;
5443         }
5444         rte_memcpy(&entry->ft_type,
5445                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5446                    key_len + data_len);
5447         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5448                 ns = sh->fdb_domain;
5449         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5450                 ns = sh->tx_domain;
5451         else
5452                 ns = sh->rx_domain;
5453         ret = mlx5_flow_os_create_flow_action_modify_header
5454                                         (sh->cdev->ctx, ns, entry,
5455                                          data_len, &entry->action);
5456         if (ret) {
5457                 mlx5_ipool_free(sh->mdh_ipools[ref->actions_num - 1], idx);
5458                 rte_flow_error_set(ctx->error, ENOMEM,
5459                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5460                                    NULL, "cannot create modification action");
5461                 return NULL;
5462         }
5463         entry->idx = idx;
5464         return &entry->entry;
5465 }
5466
5467 struct mlx5_list_entry *
5468 flow_dv_modify_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
5469                         void *cb_ctx)
5470 {
5471         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5472         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5473         struct mlx5_flow_dv_modify_hdr_resource *entry;
5474         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5475         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5476         uint32_t idx;
5477
5478         entry = mlx5_ipool_malloc(sh->mdh_ipools[ref->actions_num - 1],
5479                                   &idx);
5480         if (!entry) {
5481                 rte_flow_error_set(ctx->error, ENOMEM,
5482                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5483                                    "cannot allocate resource memory");
5484                 return NULL;
5485         }
5486         memcpy(entry, oentry, sizeof(*entry) + data_len);
5487         entry->idx = idx;
5488         return &entry->entry;
5489 }
5490
5491 void
5492 flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
5493 {
5494         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5495         struct mlx5_flow_dv_modify_hdr_resource *res =
5496                 container_of(entry, typeof(*res), entry);
5497
5498         mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
5499 }
5500
5501 /**
5502  * Validate the sample action.
5503  *
5504  * @param[in, out] action_flags
5505  *   Holds the actions detected until now.
5506  * @param[in] action
5507  *   Pointer to the sample action.
5508  * @param[in] dev
5509  *   Pointer to the Ethernet device structure.
5510  * @param[in] attr
5511  *   Attributes of flow that includes this action.
5512  * @param[in] item_flags
5513  *   Holds the items detected.
5514  * @param[in] rss
5515  *   Pointer to the RSS action.
5516  * @param[out] sample_rss
5517  *   Pointer to the RSS action in sample action list.
5518  * @param[out] count
5519  *   Pointer to the COUNT action in sample action list.
5520  * @param[out] fdb_mirror_limit
5521  *   Pointer to the FDB mirror limitation flag.
5522  * @param[out] error
5523  *   Pointer to error structure.
5524  *
5525  * @return
5526  *   0 on success, a negative errno value otherwise and rte_errno is set.
5527  */
5528 static int
5529 flow_dv_validate_action_sample(uint64_t *action_flags,
5530                                const struct rte_flow_action *action,
5531                                struct rte_eth_dev *dev,
5532                                const struct rte_flow_attr *attr,
5533                                uint64_t item_flags,
5534                                const struct rte_flow_action_rss *rss,
5535                                const struct rte_flow_action_rss **sample_rss,
5536                                const struct rte_flow_action_count **count,
5537                                int *fdb_mirror_limit,
5538                                struct rte_flow_error *error)
5539 {
5540         struct mlx5_priv *priv = dev->data->dev_private;
5541         struct mlx5_dev_config *dev_conf = &priv->config;
5542         const struct rte_flow_action_sample *sample = action->conf;
5543         const struct rte_flow_action *act;
5544         uint64_t sub_action_flags = 0;
5545         uint16_t queue_index = 0xFFFF;
5546         int actions_n = 0;
5547         int ret;
5548
5549         if (!sample)
5550                 return rte_flow_error_set(error, EINVAL,
5551                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5552                                           "configuration cannot be NULL");
5553         if (sample->ratio == 0)
5554                 return rte_flow_error_set(error, EINVAL,
5555                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5556                                           "ratio value starts from 1");
5557         if (!priv->sh->devx || (sample->ratio > 0 && !priv->sampler_en))
5558                 return rte_flow_error_set(error, ENOTSUP,
5559                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5560                                           NULL,
5561                                           "sample action not supported");
5562         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5563                 return rte_flow_error_set(error, EINVAL,
5564                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5565                                           "Multiple sample actions not "
5566                                           "supported");
5567         if (*action_flags & MLX5_FLOW_ACTION_METER)
5568                 return rte_flow_error_set(error, EINVAL,
5569                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5570                                           "wrong action order, meter should "
5571                                           "be after sample action");
5572         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5573                 return rte_flow_error_set(error, EINVAL,
5574                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5575                                           "wrong action order, jump should "
5576                                           "be after sample action");
5577         if (*action_flags & MLX5_FLOW_ACTION_CT)
5578                 return rte_flow_error_set(error, EINVAL,
5579                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5580                                           "Sample after CT not supported");
5581         act = sample->actions;
5582         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5583                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5584                         return rte_flow_error_set(error, ENOTSUP,
5585                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5586                                                   act, "too many actions");
5587                 switch (act->type) {
5588                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5589                         ret = mlx5_flow_validate_action_queue(act,
5590                                                               sub_action_flags,
5591                                                               dev,
5592                                                               attr, error);
5593                         if (ret < 0)
5594                                 return ret;
5595                         queue_index = ((const struct rte_flow_action_queue *)
5596                                                         (act->conf))->index;
5597                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5598                         ++actions_n;
5599                         break;
5600                 case RTE_FLOW_ACTION_TYPE_RSS:
5601                         *sample_rss = act->conf;
5602                         ret = mlx5_flow_validate_action_rss(act,
5603                                                             sub_action_flags,
5604                                                             dev, attr,
5605                                                             item_flags,
5606                                                             error);
5607                         if (ret < 0)
5608                                 return ret;
5609                         if (rss && *sample_rss &&
5610                             ((*sample_rss)->level != rss->level ||
5611                             (*sample_rss)->types != rss->types))
5612                                 return rte_flow_error_set(error, ENOTSUP,
5613                                         RTE_FLOW_ERROR_TYPE_ACTION,
5614                                         NULL,
5615                                         "Can't use the different RSS types "
5616                                         "or level in the same flow");
5617                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5618                                 queue_index = (*sample_rss)->queue[0];
5619                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5620                         ++actions_n;
5621                         break;
5622                 case RTE_FLOW_ACTION_TYPE_MARK:
5623                         ret = flow_dv_validate_action_mark(dev, act,
5624                                                            sub_action_flags,
5625                                                            attr, error);
5626                         if (ret < 0)
5627                                 return ret;
5628                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5629                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5630                                                 MLX5_FLOW_ACTION_MARK_EXT;
5631                         else
5632                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5633                         ++actions_n;
5634                         break;
5635                 case RTE_FLOW_ACTION_TYPE_COUNT:
5636                         ret = flow_dv_validate_action_count
5637                                 (dev, false, *action_flags | sub_action_flags,
5638                                  error);
5639                         if (ret < 0)
5640                                 return ret;
5641                         *count = act->conf;
5642                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5643                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5644                         ++actions_n;
5645                         break;
5646                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5647                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5648                         ret = flow_dv_validate_action_port_id(dev,
5649                                                               sub_action_flags,
5650                                                               act,
5651                                                               attr,
5652                                                               error);
5653                         if (ret)
5654                                 return ret;
5655                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5656                         ++actions_n;
5657                         break;
5658                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5659                         ret = flow_dv_validate_action_raw_encap_decap
5660                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5661                                  &actions_n, action, item_flags, error);
5662                         if (ret < 0)
5663                                 return ret;
5664                         ++actions_n;
5665                         break;
5666                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5667                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5668                         ret = flow_dv_validate_action_l2_encap(dev,
5669                                                                sub_action_flags,
5670                                                                act, attr,
5671                                                                error);
5672                         if (ret < 0)
5673                                 return ret;
5674                         sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5675                         ++actions_n;
5676                         break;
5677                 default:
5678                         return rte_flow_error_set(error, ENOTSUP,
5679                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5680                                                   NULL,
5681                                                   "Doesn't support optional "
5682                                                   "action");
5683                 }
5684         }
5685         if (attr->ingress && !attr->transfer) {
5686                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5687                                           MLX5_FLOW_ACTION_RSS)))
5688                         return rte_flow_error_set(error, EINVAL,
5689                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5690                                                   NULL,
5691                                                   "Ingress must has a dest "
5692                                                   "QUEUE for Sample");
5693         } else if (attr->egress && !attr->transfer) {
5694                 return rte_flow_error_set(error, ENOTSUP,
5695                                           RTE_FLOW_ERROR_TYPE_ACTION,
5696                                           NULL,
5697                                           "Sample Only support Ingress "
5698                                           "or E-Switch");
5699         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5700                 MLX5_ASSERT(attr->transfer);
5701                 if (sample->ratio > 1)
5702                         return rte_flow_error_set(error, ENOTSUP,
5703                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5704                                                   NULL,
5705                                                   "E-Switch doesn't support "
5706                                                   "any optional action "
5707                                                   "for sampling");
5708                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5709                         return rte_flow_error_set(error, ENOTSUP,
5710                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5711                                                   NULL,
5712                                                   "unsupported action QUEUE");
5713                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5714                         return rte_flow_error_set(error, ENOTSUP,
5715                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5716                                                   NULL,
5717                                                   "unsupported action QUEUE");
5718                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5719                         return rte_flow_error_set(error, EINVAL,
5720                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5721                                                   NULL,
5722                                                   "E-Switch must has a dest "
5723                                                   "port for mirroring");
5724                 if (!priv->config.hca_attr.reg_c_preserve &&
5725                      priv->representor_id != UINT16_MAX)
5726                         *fdb_mirror_limit = 1;
5727         }
5728         /* Continue validation for Xcap actions.*/
5729         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5730             (queue_index == 0xFFFF ||
5731              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5732                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5733                      MLX5_FLOW_XCAP_ACTIONS)
5734                         return rte_flow_error_set(error, ENOTSUP,
5735                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5736                                                   NULL, "encap and decap "
5737                                                   "combination aren't "
5738                                                   "supported");
5739                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5740                                                         MLX5_FLOW_ACTION_ENCAP))
5741                         return rte_flow_error_set(error, ENOTSUP,
5742                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5743                                                   NULL, "encap is not supported"
5744                                                   " for ingress traffic");
5745         }
5746         return 0;
5747 }
5748
5749 /**
5750  * Find existing modify-header resource or create and register a new one.
5751  *
5752  * @param dev[in, out]
5753  *   Pointer to rte_eth_dev structure.
5754  * @param[in, out] resource
5755  *   Pointer to modify-header resource.
5756  * @parm[in, out] dev_flow
5757  *   Pointer to the dev_flow.
5758  * @param[out] error
5759  *   pointer to error structure.
5760  *
5761  * @return
5762  *   0 on success otherwise -errno and errno is set.
5763  */
5764 static int
5765 flow_dv_modify_hdr_resource_register
5766                         (struct rte_eth_dev *dev,
5767                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5768                          struct mlx5_flow *dev_flow,
5769                          struct rte_flow_error *error)
5770 {
5771         struct mlx5_priv *priv = dev->data->dev_private;
5772         struct mlx5_dev_ctx_shared *sh = priv->sh;
5773         uint32_t key_len = sizeof(*resource) -
5774                            offsetof(typeof(*resource), ft_type) +
5775                            resource->actions_num * sizeof(resource->actions[0]);
5776         struct mlx5_list_entry *entry;
5777         struct mlx5_flow_cb_ctx ctx = {
5778                 .error = error,
5779                 .data = resource,
5780         };
5781         struct mlx5_hlist *modify_cmds;
5782         uint64_t key64;
5783
5784         modify_cmds = flow_dv_hlist_prepare(sh, &sh->modify_cmds,
5785                                 "hdr_modify",
5786                                 MLX5_FLOW_HDR_MODIFY_HTABLE_SZ,
5787                                 true, false, sh,
5788                                 flow_dv_modify_create_cb,
5789                                 flow_dv_modify_match_cb,
5790                                 flow_dv_modify_remove_cb,
5791                                 flow_dv_modify_clone_cb,
5792                                 flow_dv_modify_clone_free_cb);
5793         if (unlikely(!modify_cmds))
5794                 return -rte_errno;
5795         resource->root = !dev_flow->dv.group;
5796         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5797                                                                 resource->root))
5798                 return rte_flow_error_set(error, EOVERFLOW,
5799                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5800                                           "too many modify header items");
5801         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5802         entry = mlx5_hlist_register(modify_cmds, key64, &ctx);
5803         if (!entry)
5804                 return -rte_errno;
5805         resource = container_of(entry, typeof(*resource), entry);
5806         dev_flow->handle->dvh.modify_hdr = resource;
5807         return 0;
5808 }
5809
5810 /**
5811  * Get DV flow counter by index.
5812  *
5813  * @param[in] dev
5814  *   Pointer to the Ethernet device structure.
5815  * @param[in] idx
5816  *   mlx5 flow counter index in the container.
5817  * @param[out] ppool
5818  *   mlx5 flow counter pool in the container.
5819  *
5820  * @return
5821  *   Pointer to the counter, NULL otherwise.
5822  */
5823 static struct mlx5_flow_counter *
5824 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5825                            uint32_t idx,
5826                            struct mlx5_flow_counter_pool **ppool)
5827 {
5828         struct mlx5_priv *priv = dev->data->dev_private;
5829         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5830         struct mlx5_flow_counter_pool *pool;
5831
5832         /* Decrease to original index and clear shared bit. */
5833         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5834         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5835         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5836         MLX5_ASSERT(pool);
5837         if (ppool)
5838                 *ppool = pool;
5839         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5840 }
5841
5842 /**
5843  * Check the devx counter belongs to the pool.
5844  *
5845  * @param[in] pool
5846  *   Pointer to the counter pool.
5847  * @param[in] id
5848  *   The counter devx ID.
5849  *
5850  * @return
5851  *   True if counter belongs to the pool, false otherwise.
5852  */
5853 static bool
5854 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5855 {
5856         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5857                    MLX5_COUNTERS_PER_POOL;
5858
5859         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5860                 return true;
5861         return false;
5862 }
5863
5864 /**
5865  * Get a pool by devx counter ID.
5866  *
5867  * @param[in] cmng
5868  *   Pointer to the counter management.
5869  * @param[in] id
5870  *   The counter devx ID.
5871  *
5872  * @return
5873  *   The counter pool pointer if exists, NULL otherwise,
5874  */
5875 static struct mlx5_flow_counter_pool *
5876 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5877 {
5878         uint32_t i;
5879         struct mlx5_flow_counter_pool *pool = NULL;
5880
5881         rte_spinlock_lock(&cmng->pool_update_sl);
5882         /* Check last used pool. */
5883         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5884             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5885                 pool = cmng->pools[cmng->last_pool_idx];
5886                 goto out;
5887         }
5888         /* ID out of range means no suitable pool in the container. */
5889         if (id > cmng->max_id || id < cmng->min_id)
5890                 goto out;
5891         /*
5892          * Find the pool from the end of the container, since mostly counter
5893          * ID is sequence increasing, and the last pool should be the needed
5894          * one.
5895          */
5896         i = cmng->n_valid;
5897         while (i--) {
5898                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5899
5900                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5901                         pool = pool_tmp;
5902                         break;
5903                 }
5904         }
5905 out:
5906         rte_spinlock_unlock(&cmng->pool_update_sl);
5907         return pool;
5908 }
5909
5910 /**
5911  * Resize a counter container.
5912  *
5913  * @param[in] dev
5914  *   Pointer to the Ethernet device structure.
5915  *
5916  * @return
5917  *   0 on success, otherwise negative errno value and rte_errno is set.
5918  */
5919 static int
5920 flow_dv_container_resize(struct rte_eth_dev *dev)
5921 {
5922         struct mlx5_priv *priv = dev->data->dev_private;
5923         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5924         void *old_pools = cmng->pools;
5925         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5926         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5927         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5928
5929         if (!pools) {
5930                 rte_errno = ENOMEM;
5931                 return -ENOMEM;
5932         }
5933         if (old_pools)
5934                 memcpy(pools, old_pools, cmng->n *
5935                                        sizeof(struct mlx5_flow_counter_pool *));
5936         cmng->n = resize;
5937         cmng->pools = pools;
5938         if (old_pools)
5939                 mlx5_free(old_pools);
5940         return 0;
5941 }
5942
5943 /**
5944  * Query a devx flow counter.
5945  *
5946  * @param[in] dev
5947  *   Pointer to the Ethernet device structure.
5948  * @param[in] counter
5949  *   Index to the flow counter.
5950  * @param[out] pkts
5951  *   The statistics value of packets.
5952  * @param[out] bytes
5953  *   The statistics value of bytes.
5954  *
5955  * @return
5956  *   0 on success, otherwise a negative errno value and rte_errno is set.
5957  */
5958 static inline int
5959 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5960                      uint64_t *bytes)
5961 {
5962         struct mlx5_priv *priv = dev->data->dev_private;
5963         struct mlx5_flow_counter_pool *pool = NULL;
5964         struct mlx5_flow_counter *cnt;
5965         int offset;
5966
5967         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5968         MLX5_ASSERT(pool);
5969         if (priv->sh->cmng.counter_fallback)
5970                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
5971                                         0, pkts, bytes, 0, NULL, NULL, 0);
5972         rte_spinlock_lock(&pool->sl);
5973         if (!pool->raw) {
5974                 *pkts = 0;
5975                 *bytes = 0;
5976         } else {
5977                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
5978                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
5979                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
5980         }
5981         rte_spinlock_unlock(&pool->sl);
5982         return 0;
5983 }
5984
5985 /**
5986  * Create and initialize a new counter pool.
5987  *
5988  * @param[in] dev
5989  *   Pointer to the Ethernet device structure.
5990  * @param[out] dcs
5991  *   The devX counter handle.
5992  * @param[in] age
5993  *   Whether the pool is for counter that was allocated for aging.
5994  * @param[in/out] cont_cur
5995  *   Pointer to the container pointer, it will be update in pool resize.
5996  *
5997  * @return
5998  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
5999  */
6000 static struct mlx5_flow_counter_pool *
6001 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
6002                     uint32_t age)
6003 {
6004         struct mlx5_priv *priv = dev->data->dev_private;
6005         struct mlx5_flow_counter_pool *pool;
6006         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6007         bool fallback = priv->sh->cmng.counter_fallback;
6008         uint32_t size = sizeof(*pool);
6009
6010         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
6011         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
6012         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
6013         if (!pool) {
6014                 rte_errno = ENOMEM;
6015                 return NULL;
6016         }
6017         pool->raw = NULL;
6018         pool->is_aged = !!age;
6019         pool->query_gen = 0;
6020         pool->min_dcs = dcs;
6021         rte_spinlock_init(&pool->sl);
6022         rte_spinlock_init(&pool->csl);
6023         TAILQ_INIT(&pool->counters[0]);
6024         TAILQ_INIT(&pool->counters[1]);
6025         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
6026         rte_spinlock_lock(&cmng->pool_update_sl);
6027         pool->index = cmng->n_valid;
6028         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
6029                 mlx5_free(pool);
6030                 rte_spinlock_unlock(&cmng->pool_update_sl);
6031                 return NULL;
6032         }
6033         cmng->pools[pool->index] = pool;
6034         cmng->n_valid++;
6035         if (unlikely(fallback)) {
6036                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
6037
6038                 if (base < cmng->min_id)
6039                         cmng->min_id = base;
6040                 if (base > cmng->max_id)
6041                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
6042                 cmng->last_pool_idx = pool->index;
6043         }
6044         rte_spinlock_unlock(&cmng->pool_update_sl);
6045         return pool;
6046 }
6047
6048 /**
6049  * Prepare a new counter and/or a new counter pool.
6050  *
6051  * @param[in] dev
6052  *   Pointer to the Ethernet device structure.
6053  * @param[out] cnt_free
6054  *   Where to put the pointer of a new counter.
6055  * @param[in] age
6056  *   Whether the pool is for counter that was allocated for aging.
6057  *
6058  * @return
6059  *   The counter pool pointer and @p cnt_free is set on success,
6060  *   NULL otherwise and rte_errno is set.
6061  */
6062 static struct mlx5_flow_counter_pool *
6063 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
6064                              struct mlx5_flow_counter **cnt_free,
6065                              uint32_t age)
6066 {
6067         struct mlx5_priv *priv = dev->data->dev_private;
6068         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6069         struct mlx5_flow_counter_pool *pool;
6070         struct mlx5_counters tmp_tq;
6071         struct mlx5_devx_obj *dcs = NULL;
6072         struct mlx5_flow_counter *cnt;
6073         enum mlx5_counter_type cnt_type =
6074                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6075         bool fallback = priv->sh->cmng.counter_fallback;
6076         uint32_t i;
6077
6078         if (fallback) {
6079                 /* bulk_bitmap must be 0 for single counter allocation. */
6080                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0);
6081                 if (!dcs)
6082                         return NULL;
6083                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
6084                 if (!pool) {
6085                         pool = flow_dv_pool_create(dev, dcs, age);
6086                         if (!pool) {
6087                                 mlx5_devx_cmd_destroy(dcs);
6088                                 return NULL;
6089                         }
6090                 }
6091                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
6092                 cnt = MLX5_POOL_GET_CNT(pool, i);
6093                 cnt->pool = pool;
6094                 cnt->dcs_when_free = dcs;
6095                 *cnt_free = cnt;
6096                 return pool;
6097         }
6098         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
6099         if (!dcs) {
6100                 rte_errno = ENODATA;
6101                 return NULL;
6102         }
6103         pool = flow_dv_pool_create(dev, dcs, age);
6104         if (!pool) {
6105                 mlx5_devx_cmd_destroy(dcs);
6106                 return NULL;
6107         }
6108         TAILQ_INIT(&tmp_tq);
6109         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
6110                 cnt = MLX5_POOL_GET_CNT(pool, i);
6111                 cnt->pool = pool;
6112                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
6113         }
6114         rte_spinlock_lock(&cmng->csl[cnt_type]);
6115         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
6116         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6117         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
6118         (*cnt_free)->pool = pool;
6119         return pool;
6120 }
6121
6122 /**
6123  * Allocate a flow counter.
6124  *
6125  * @param[in] dev
6126  *   Pointer to the Ethernet device structure.
6127  * @param[in] age
6128  *   Whether the counter was allocated for aging.
6129  *
6130  * @return
6131  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6132  */
6133 static uint32_t
6134 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
6135 {
6136         struct mlx5_priv *priv = dev->data->dev_private;
6137         struct mlx5_flow_counter_pool *pool = NULL;
6138         struct mlx5_flow_counter *cnt_free = NULL;
6139         bool fallback = priv->sh->cmng.counter_fallback;
6140         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6141         enum mlx5_counter_type cnt_type =
6142                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6143         uint32_t cnt_idx;
6144
6145         if (!priv->sh->devx) {
6146                 rte_errno = ENOTSUP;
6147                 return 0;
6148         }
6149         /* Get free counters from container. */
6150         rte_spinlock_lock(&cmng->csl[cnt_type]);
6151         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
6152         if (cnt_free)
6153                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
6154         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6155         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
6156                 goto err;
6157         pool = cnt_free->pool;
6158         if (fallback)
6159                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
6160         /* Create a DV counter action only in the first time usage. */
6161         if (!cnt_free->action) {
6162                 uint16_t offset;
6163                 struct mlx5_devx_obj *dcs;
6164                 int ret;
6165
6166                 if (!fallback) {
6167                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
6168                         dcs = pool->min_dcs;
6169                 } else {
6170                         offset = 0;
6171                         dcs = cnt_free->dcs_when_free;
6172                 }
6173                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
6174                                                             &cnt_free->action);
6175                 if (ret) {
6176                         rte_errno = errno;
6177                         goto err;
6178                 }
6179         }
6180         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
6181                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
6182         /* Update the counter reset values. */
6183         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
6184                                  &cnt_free->bytes))
6185                 goto err;
6186         if (!fallback && !priv->sh->cmng.query_thread_on)
6187                 /* Start the asynchronous batch query by the host thread. */
6188                 mlx5_set_query_alarm(priv->sh);
6189         /*
6190          * When the count action isn't shared (by ID), shared_info field is
6191          * used for indirect action API's refcnt.
6192          * When the counter action is not shared neither by ID nor by indirect
6193          * action API, shared info must be 1.
6194          */
6195         cnt_free->shared_info.refcnt = 1;
6196         return cnt_idx;
6197 err:
6198         if (cnt_free) {
6199                 cnt_free->pool = pool;
6200                 if (fallback)
6201                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
6202                 rte_spinlock_lock(&cmng->csl[cnt_type]);
6203                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
6204                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
6205         }
6206         return 0;
6207 }
6208
6209 /**
6210  * Get age param from counter index.
6211  *
6212  * @param[in] dev
6213  *   Pointer to the Ethernet device structure.
6214  * @param[in] counter
6215  *   Index to the counter handler.
6216  *
6217  * @return
6218  *   The aging parameter specified for the counter index.
6219  */
6220 static struct mlx5_age_param*
6221 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
6222                                 uint32_t counter)
6223 {
6224         struct mlx5_flow_counter *cnt;
6225         struct mlx5_flow_counter_pool *pool = NULL;
6226
6227         flow_dv_counter_get_by_idx(dev, counter, &pool);
6228         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
6229         cnt = MLX5_POOL_GET_CNT(pool, counter);
6230         return MLX5_CNT_TO_AGE(cnt);
6231 }
6232
6233 /**
6234  * Remove a flow counter from aged counter list.
6235  *
6236  * @param[in] dev
6237  *   Pointer to the Ethernet device structure.
6238  * @param[in] counter
6239  *   Index to the counter handler.
6240  * @param[in] cnt
6241  *   Pointer to the counter handler.
6242  */
6243 static void
6244 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
6245                                 uint32_t counter, struct mlx5_flow_counter *cnt)
6246 {
6247         struct mlx5_age_info *age_info;
6248         struct mlx5_age_param *age_param;
6249         struct mlx5_priv *priv = dev->data->dev_private;
6250         uint16_t expected = AGE_CANDIDATE;
6251
6252         age_info = GET_PORT_AGE_INFO(priv);
6253         age_param = flow_dv_counter_idx_get_age(dev, counter);
6254         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
6255                                          AGE_FREE, false, __ATOMIC_RELAXED,
6256                                          __ATOMIC_RELAXED)) {
6257                 /**
6258                  * We need the lock even it is age timeout,
6259                  * since counter may still in process.
6260                  */
6261                 rte_spinlock_lock(&age_info->aged_sl);
6262                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
6263                 rte_spinlock_unlock(&age_info->aged_sl);
6264                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
6265         }
6266 }
6267
6268 /**
6269  * Release a flow counter.
6270  *
6271  * @param[in] dev
6272  *   Pointer to the Ethernet device structure.
6273  * @param[in] counter
6274  *   Index to the counter handler.
6275  */
6276 static void
6277 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
6278 {
6279         struct mlx5_priv *priv = dev->data->dev_private;
6280         struct mlx5_flow_counter_pool *pool = NULL;
6281         struct mlx5_flow_counter *cnt;
6282         enum mlx5_counter_type cnt_type;
6283
6284         if (!counter)
6285                 return;
6286         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6287         MLX5_ASSERT(pool);
6288         if (pool->is_aged) {
6289                 flow_dv_counter_remove_from_age(dev, counter, cnt);
6290         } else {
6291                 /*
6292                  * If the counter action is shared by indirect action API,
6293                  * the atomic function reduces its references counter.
6294                  * If after the reduction the action is still referenced, the
6295                  * function returns here and does not release it.
6296                  * When the counter action is not shared by
6297                  * indirect action API, shared info is 1 before the reduction,
6298                  * so this condition is failed and function doesn't return here.
6299                  */
6300                 if (__atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
6301                                        __ATOMIC_RELAXED))
6302                         return;
6303         }
6304         cnt->pool = pool;
6305         /*
6306          * Put the counter back to list to be updated in none fallback mode.
6307          * Currently, we are using two list alternately, while one is in query,
6308          * add the freed counter to the other list based on the pool query_gen
6309          * value. After query finishes, add counter the list to the global
6310          * container counter list. The list changes while query starts. In
6311          * this case, lock will not be needed as query callback and release
6312          * function both operate with the different list.
6313          */
6314         if (!priv->sh->cmng.counter_fallback) {
6315                 rte_spinlock_lock(&pool->csl);
6316                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
6317                 rte_spinlock_unlock(&pool->csl);
6318         } else {
6319                 cnt->dcs_when_free = cnt->dcs_when_active;
6320                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6321                                            MLX5_COUNTER_TYPE_ORIGIN;
6322                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
6323                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
6324                                   cnt, next);
6325                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
6326         }
6327 }
6328
6329 /**
6330  * Resize a meter id container.
6331  *
6332  * @param[in] dev
6333  *   Pointer to the Ethernet device structure.
6334  *
6335  * @return
6336  *   0 on success, otherwise negative errno value and rte_errno is set.
6337  */
6338 static int
6339 flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
6340 {
6341         struct mlx5_priv *priv = dev->data->dev_private;
6342         struct mlx5_aso_mtr_pools_mng *pools_mng =
6343                                 &priv->sh->mtrmng->pools_mng;
6344         void *old_pools = pools_mng->pools;
6345         uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
6346         uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
6347         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6348
6349         if (!pools) {
6350                 rte_errno = ENOMEM;
6351                 return -ENOMEM;
6352         }
6353         if (!pools_mng->n)
6354                 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6355                         mlx5_free(pools);
6356                         return -ENOMEM;
6357                 }
6358         if (old_pools)
6359                 memcpy(pools, old_pools, pools_mng->n *
6360                                        sizeof(struct mlx5_aso_mtr_pool *));
6361         pools_mng->n = resize;
6362         pools_mng->pools = pools;
6363         if (old_pools)
6364                 mlx5_free(old_pools);
6365         return 0;
6366 }
6367
6368 /**
6369  * Prepare a new meter and/or a new meter pool.
6370  *
6371  * @param[in] dev
6372  *   Pointer to the Ethernet device structure.
6373  * @param[out] mtr_free
6374  *   Where to put the pointer of a new meter.g.
6375  *
6376  * @return
6377  *   The meter pool pointer and @mtr_free is set on success,
6378  *   NULL otherwise and rte_errno is set.
6379  */
6380 static struct mlx5_aso_mtr_pool *
6381 flow_dv_mtr_pool_create(struct rte_eth_dev *dev, struct mlx5_aso_mtr **mtr_free)
6382 {
6383         struct mlx5_priv *priv = dev->data->dev_private;
6384         struct mlx5_aso_mtr_pools_mng *pools_mng = &priv->sh->mtrmng->pools_mng;
6385         struct mlx5_aso_mtr_pool *pool = NULL;
6386         struct mlx5_devx_obj *dcs = NULL;
6387         uint32_t i;
6388         uint32_t log_obj_size;
6389
6390         log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6391         dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->cdev->ctx,
6392                                                       priv->sh->cdev->pdn,
6393                                                       log_obj_size);
6394         if (!dcs) {
6395                 rte_errno = ENODATA;
6396                 return NULL;
6397         }
6398         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6399         if (!pool) {
6400                 rte_errno = ENOMEM;
6401                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6402                 return NULL;
6403         }
6404         pool->devx_obj = dcs;
6405         rte_rwlock_write_lock(&pools_mng->resize_mtrwl);
6406         pool->index = pools_mng->n_valid;
6407         if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6408                 mlx5_free(pool);
6409                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6410                 rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
6411                 return NULL;
6412         }
6413         pools_mng->pools[pool->index] = pool;
6414         pools_mng->n_valid++;
6415         rte_rwlock_write_unlock(&pools_mng->resize_mtrwl);
6416         for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6417                 pool->mtrs[i].offset = i;
6418                 LIST_INSERT_HEAD(&pools_mng->meters, &pool->mtrs[i], next);
6419         }
6420         pool->mtrs[0].offset = 0;
6421         *mtr_free = &pool->mtrs[0];
6422         return pool;
6423 }
6424
6425 /**
6426  * Release a flow meter into pool.
6427  *
6428  * @param[in] dev
6429  *   Pointer to the Ethernet device structure.
6430  * @param[in] mtr_idx
6431  *   Index to aso flow meter.
6432  */
6433 static void
6434 flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6435 {
6436         struct mlx5_priv *priv = dev->data->dev_private;
6437         struct mlx5_aso_mtr_pools_mng *pools_mng =
6438                                 &priv->sh->mtrmng->pools_mng;
6439         struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6440
6441         MLX5_ASSERT(aso_mtr);
6442         rte_spinlock_lock(&pools_mng->mtrsl);
6443         memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6444         aso_mtr->state = ASO_METER_FREE;
6445         LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6446         rte_spinlock_unlock(&pools_mng->mtrsl);
6447 }
6448
6449 /**
6450  * Allocate a aso flow meter.
6451  *
6452  * @param[in] dev
6453  *   Pointer to the Ethernet device structure.
6454  *
6455  * @return
6456  *   Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6457  */
6458 static uint32_t
6459 flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6460 {
6461         struct mlx5_priv *priv = dev->data->dev_private;
6462         struct mlx5_aso_mtr *mtr_free = NULL;
6463         struct mlx5_aso_mtr_pools_mng *pools_mng =
6464                                 &priv->sh->mtrmng->pools_mng;
6465         struct mlx5_aso_mtr_pool *pool;
6466         uint32_t mtr_idx = 0;
6467
6468         if (!priv->sh->devx) {
6469                 rte_errno = ENOTSUP;
6470                 return 0;
6471         }
6472         /* Allocate the flow meter memory. */
6473         /* Get free meters from management. */
6474         rte_spinlock_lock(&pools_mng->mtrsl);
6475         mtr_free = LIST_FIRST(&pools_mng->meters);
6476         if (mtr_free)
6477                 LIST_REMOVE(mtr_free, next);
6478         if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6479                 rte_spinlock_unlock(&pools_mng->mtrsl);
6480                 return 0;
6481         }
6482         mtr_free->state = ASO_METER_WAIT;
6483         rte_spinlock_unlock(&pools_mng->mtrsl);
6484         pool = container_of(mtr_free,
6485                         struct mlx5_aso_mtr_pool,
6486                         mtrs[mtr_free->offset]);
6487         mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6488         if (!mtr_free->fm.meter_action) {
6489 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6490                 struct rte_flow_error error;
6491                 uint8_t reg_id;
6492
6493                 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6494                 mtr_free->fm.meter_action =
6495                         mlx5_glue->dv_create_flow_action_aso
6496                                                 (priv->sh->rx_domain,
6497                                                  pool->devx_obj->obj,
6498                                                  mtr_free->offset,
6499                                                  (1 << MLX5_FLOW_COLOR_GREEN),
6500                                                  reg_id - REG_C_0);
6501 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6502                 if (!mtr_free->fm.meter_action) {
6503                         flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6504                         return 0;
6505                 }
6506         }
6507         return mtr_idx;
6508 }
6509
6510 /**
6511  * Verify the @p attributes will be correctly understood by the NIC and store
6512  * them in the @p flow if everything is correct.
6513  *
6514  * @param[in] dev
6515  *   Pointer to dev struct.
6516  * @param[in] attributes
6517  *   Pointer to flow attributes
6518  * @param[in] external
6519  *   This flow rule is created by request external to PMD.
6520  * @param[out] error
6521  *   Pointer to error structure.
6522  *
6523  * @return
6524  *   - 0 on success and non root table.
6525  *   - 1 on success and root table.
6526  *   - a negative errno value otherwise and rte_errno is set.
6527  */
6528 static int
6529 flow_dv_validate_attributes(struct rte_eth_dev *dev,
6530                             const struct mlx5_flow_tunnel *tunnel,
6531                             const struct rte_flow_attr *attributes,
6532                             const struct flow_grp_info *grp_info,
6533                             struct rte_flow_error *error)
6534 {
6535         struct mlx5_priv *priv = dev->data->dev_private;
6536         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6537         int ret = 0;
6538
6539 #ifndef HAVE_MLX5DV_DR
6540         RTE_SET_USED(tunnel);
6541         RTE_SET_USED(grp_info);
6542         if (attributes->group)
6543                 return rte_flow_error_set(error, ENOTSUP,
6544                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6545                                           NULL,
6546                                           "groups are not supported");
6547 #else
6548         uint32_t table = 0;
6549
6550         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6551                                        grp_info, error);
6552         if (ret)
6553                 return ret;
6554         if (!table)
6555                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6556 #endif
6557         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6558             attributes->priority > lowest_priority)
6559                 return rte_flow_error_set(error, ENOTSUP,
6560                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6561                                           NULL,
6562                                           "priority out of range");
6563         if (attributes->transfer) {
6564                 if (!priv->config.dv_esw_en)
6565                         return rte_flow_error_set
6566                                 (error, ENOTSUP,
6567                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6568                                  "E-Switch dr is not supported");
6569                 if (!(priv->representor || priv->master))
6570                         return rte_flow_error_set
6571                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6572                                  NULL, "E-Switch configuration can only be"
6573                                  " done by a master or a representor device");
6574                 if (attributes->egress)
6575                         return rte_flow_error_set
6576                                 (error, ENOTSUP,
6577                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6578                                  "egress is not supported");
6579         }
6580         if (!(attributes->egress ^ attributes->ingress))
6581                 return rte_flow_error_set(error, ENOTSUP,
6582                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6583                                           "must specify exactly one of "
6584                                           "ingress or egress");
6585         return ret;
6586 }
6587
6588 static int
6589 validate_integrity_bits(const struct rte_flow_item_integrity *mask,
6590                         int64_t pattern_flags, uint64_t l3_flags,
6591                         uint64_t l4_flags, uint64_t ip4_flag,
6592                         struct rte_flow_error *error)
6593 {
6594         if (mask->l3_ok && !(pattern_flags & l3_flags))
6595                 return rte_flow_error_set(error, EINVAL,
6596                                           RTE_FLOW_ERROR_TYPE_ITEM,
6597                                           NULL, "missing L3 protocol");
6598
6599         if (mask->ipv4_csum_ok && !(pattern_flags & ip4_flag))
6600                 return rte_flow_error_set(error, EINVAL,
6601                                           RTE_FLOW_ERROR_TYPE_ITEM,
6602                                           NULL, "missing IPv4 protocol");
6603
6604         if ((mask->l4_ok || mask->l4_csum_ok) && !(pattern_flags & l4_flags))
6605                 return rte_flow_error_set(error, EINVAL,
6606                                           RTE_FLOW_ERROR_TYPE_ITEM,
6607                                           NULL, "missing L4 protocol");
6608
6609         return 0;
6610 }
6611
6612 static int
6613 flow_dv_validate_item_integrity_post(const struct
6614                                      rte_flow_item *integrity_items[2],
6615                                      int64_t pattern_flags,
6616                                      struct rte_flow_error *error)
6617 {
6618         const struct rte_flow_item_integrity *mask;
6619         int ret;
6620
6621         if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
6622                 mask = (typeof(mask))integrity_items[0]->mask;
6623                 ret = validate_integrity_bits(mask, pattern_flags,
6624                                               MLX5_FLOW_LAYER_OUTER_L3,
6625                                               MLX5_FLOW_LAYER_OUTER_L4,
6626                                               MLX5_FLOW_LAYER_OUTER_L3_IPV4,
6627                                               error);
6628                 if (ret)
6629                         return ret;
6630         }
6631         if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
6632                 mask = (typeof(mask))integrity_items[1]->mask;
6633                 ret = validate_integrity_bits(mask, pattern_flags,
6634                                               MLX5_FLOW_LAYER_INNER_L3,
6635                                               MLX5_FLOW_LAYER_INNER_L4,
6636                                               MLX5_FLOW_LAYER_INNER_L3_IPV4,
6637                                               error);
6638                 if (ret)
6639                         return ret;
6640         }
6641         return 0;
6642 }
6643
6644 static int
6645 flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
6646                                 const struct rte_flow_item *integrity_item,
6647                                 uint64_t pattern_flags, uint64_t *last_item,
6648                                 const struct rte_flow_item *integrity_items[2],
6649                                 struct rte_flow_error *error)
6650 {
6651         struct mlx5_priv *priv = dev->data->dev_private;
6652         const struct rte_flow_item_integrity *mask = (typeof(mask))
6653                                                      integrity_item->mask;
6654         const struct rte_flow_item_integrity *spec = (typeof(spec))
6655                                                      integrity_item->spec;
6656
6657         if (!priv->config.hca_attr.pkt_integrity_match)
6658                 return rte_flow_error_set(error, ENOTSUP,
6659                                           RTE_FLOW_ERROR_TYPE_ITEM,
6660                                           integrity_item,
6661                                           "packet integrity integrity_item not supported");
6662         if (!spec)
6663                 return rte_flow_error_set(error, ENOTSUP,
6664                                           RTE_FLOW_ERROR_TYPE_ITEM,
6665                                           integrity_item,
6666                                           "no spec for integrity item");
6667         if (!mask)
6668                 mask = &rte_flow_item_integrity_mask;
6669         if (!mlx5_validate_integrity_item(mask))
6670                 return rte_flow_error_set(error, ENOTSUP,
6671                                           RTE_FLOW_ERROR_TYPE_ITEM,
6672                                           integrity_item,
6673                                           "unsupported integrity filter");
6674         if (spec->level > 1) {
6675                 if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY)
6676                         return rte_flow_error_set
6677                                 (error, ENOTSUP,
6678                                  RTE_FLOW_ERROR_TYPE_ITEM,
6679                                  NULL, "multiple inner integrity items not supported");
6680                 integrity_items[1] = integrity_item;
6681                 *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
6682         } else {
6683                 if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY)
6684                         return rte_flow_error_set
6685                                 (error, ENOTSUP,
6686                                  RTE_FLOW_ERROR_TYPE_ITEM,
6687                                  NULL, "multiple outer integrity items not supported");
6688                 integrity_items[0] = integrity_item;
6689                 *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
6690         }
6691         return 0;
6692 }
6693
6694 static int
6695 flow_dv_validate_item_flex(struct rte_eth_dev *dev,
6696                            const struct rte_flow_item *item,
6697                            uint64_t item_flags,
6698                            uint64_t *last_item,
6699                            bool is_inner,
6700                            struct rte_flow_error *error)
6701 {
6702         const struct rte_flow_item_flex *flow_spec = item->spec;
6703         const struct rte_flow_item_flex *flow_mask = item->mask;
6704         struct mlx5_flex_item *flex;
6705
6706         if (!flow_spec)
6707                 return rte_flow_error_set(error, EINVAL,
6708                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6709                                           "flex flow item spec cannot be NULL");
6710         if (!flow_mask)
6711                 return rte_flow_error_set(error, EINVAL,
6712                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6713                                           "flex flow item mask cannot be NULL");
6714         if (item->last)
6715                 return rte_flow_error_set(error, ENOTSUP,
6716                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6717                                           "flex flow item last not supported");
6718         if (mlx5_flex_acquire_index(dev, flow_spec->handle, false) < 0)
6719                 return rte_flow_error_set(error, EINVAL,
6720                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6721                                           "invalid flex flow item handle");
6722         flex = (struct mlx5_flex_item *)flow_spec->handle;
6723         switch (flex->tunnel_mode) {
6724         case FLEX_TUNNEL_MODE_SINGLE:
6725                 if (item_flags &
6726                     (MLX5_FLOW_ITEM_OUTER_FLEX | MLX5_FLOW_ITEM_INNER_FLEX))
6727                         rte_flow_error_set(error, EINVAL,
6728                                            RTE_FLOW_ERROR_TYPE_ITEM,
6729                                            NULL, "multiple flex items not supported");
6730                 break;
6731         case FLEX_TUNNEL_MODE_OUTER:
6732                 if (is_inner)
6733                         rte_flow_error_set(error, EINVAL,
6734                                            RTE_FLOW_ERROR_TYPE_ITEM,
6735                                            NULL, "inner flex item was not configured");
6736                 if (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX)
6737                         rte_flow_error_set(error, ENOTSUP,
6738                                            RTE_FLOW_ERROR_TYPE_ITEM,
6739                                            NULL, "multiple flex items not supported");
6740                 break;
6741         case FLEX_TUNNEL_MODE_INNER:
6742                 if (!is_inner)
6743                         rte_flow_error_set(error, EINVAL,
6744                                            RTE_FLOW_ERROR_TYPE_ITEM,
6745                                            NULL, "outer flex item was not configured");
6746                 if (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)
6747                         rte_flow_error_set(error, EINVAL,
6748                                            RTE_FLOW_ERROR_TYPE_ITEM,
6749                                            NULL, "multiple flex items not supported");
6750                 break;
6751         case FLEX_TUNNEL_MODE_MULTI:
6752                 if ((is_inner && (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)) ||
6753                     (!is_inner && (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX))) {
6754                         rte_flow_error_set(error, EINVAL,
6755                                            RTE_FLOW_ERROR_TYPE_ITEM,
6756                                            NULL, "multiple flex items not supported");
6757                 }
6758                 break;
6759         case FLEX_TUNNEL_MODE_TUNNEL:
6760                 if (is_inner || (item_flags & MLX5_FLOW_ITEM_FLEX_TUNNEL))
6761                         rte_flow_error_set(error, EINVAL,
6762                                            RTE_FLOW_ERROR_TYPE_ITEM,
6763                                            NULL, "multiple flex tunnel items not supported");
6764                 break;
6765         default:
6766                 rte_flow_error_set(error, EINVAL,
6767                                    RTE_FLOW_ERROR_TYPE_ITEM,
6768                                    NULL, "invalid flex item configuration");
6769         }
6770         *last_item = flex->tunnel_mode == FLEX_TUNNEL_MODE_TUNNEL ?
6771                      MLX5_FLOW_ITEM_FLEX_TUNNEL : is_inner ?
6772                      MLX5_FLOW_ITEM_INNER_FLEX : MLX5_FLOW_ITEM_OUTER_FLEX;
6773         return 0;
6774 }
6775
6776 /**
6777  * Internal validation function. For validating both actions and items.
6778  *
6779  * @param[in] dev
6780  *   Pointer to the rte_eth_dev structure.
6781  * @param[in] attr
6782  *   Pointer to the flow attributes.
6783  * @param[in] items
6784  *   Pointer to the list of items.
6785  * @param[in] actions
6786  *   Pointer to the list of actions.
6787  * @param[in] external
6788  *   This flow rule is created by request external to PMD.
6789  * @param[in] hairpin
6790  *   Number of hairpin TX actions, 0 means classic flow.
6791  * @param[out] error
6792  *   Pointer to the error structure.
6793  *
6794  * @return
6795  *   0 on success, a negative errno value otherwise and rte_errno is set.
6796  */
6797 static int
6798 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6799                  const struct rte_flow_item items[],
6800                  const struct rte_flow_action actions[],
6801                  bool external, int hairpin, struct rte_flow_error *error)
6802 {
6803         int ret;
6804         uint64_t action_flags = 0;
6805         uint64_t item_flags = 0;
6806         uint64_t last_item = 0;
6807         uint8_t next_protocol = 0xff;
6808         uint16_t ether_type = 0;
6809         int actions_n = 0;
6810         uint8_t item_ipv6_proto = 0;
6811         int fdb_mirror_limit = 0;
6812         int modify_after_mirror = 0;
6813         const struct rte_flow_item *geneve_item = NULL;
6814         const struct rte_flow_item *gre_item = NULL;
6815         const struct rte_flow_item *gtp_item = NULL;
6816         const struct rte_flow_action_raw_decap *decap;
6817         const struct rte_flow_action_raw_encap *encap;
6818         const struct rte_flow_action_rss *rss = NULL;
6819         const struct rte_flow_action_rss *sample_rss = NULL;
6820         const struct rte_flow_action_count *sample_count = NULL;
6821         const struct rte_flow_item_tcp nic_tcp_mask = {
6822                 .hdr = {
6823                         .tcp_flags = 0xFF,
6824                         .src_port = RTE_BE16(UINT16_MAX),
6825                         .dst_port = RTE_BE16(UINT16_MAX),
6826                 }
6827         };
6828         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6829                 .hdr = {
6830                         .src_addr =
6831                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6832                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6833                         .dst_addr =
6834                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6835                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6836                         .vtc_flow = RTE_BE32(0xffffffff),
6837                         .proto = 0xff,
6838                         .hop_limits = 0xff,
6839                 },
6840                 .has_frag_ext = 1,
6841         };
6842         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6843                 .hdr = {
6844                         .common = {
6845                                 .u32 =
6846                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6847                                         .type = 0xFF,
6848                                         }).u32),
6849                         },
6850                         .dummy[0] = 0xffffffff,
6851                 },
6852         };
6853         struct mlx5_priv *priv = dev->data->dev_private;
6854         struct mlx5_dev_config *dev_conf = &priv->config;
6855         uint16_t queue_index = 0xFFFF;
6856         const struct rte_flow_item_vlan *vlan_m = NULL;
6857         uint32_t rw_act_num = 0;
6858         uint64_t is_root;
6859         const struct mlx5_flow_tunnel *tunnel;
6860         enum mlx5_tof_rule_type tof_rule_type;
6861         struct flow_grp_info grp_info = {
6862                 .external = !!external,
6863                 .transfer = !!attr->transfer,
6864                 .fdb_def_rule = !!priv->fdb_def_rule,
6865                 .std_tbl_fix = true,
6866         };
6867         const struct rte_eth_hairpin_conf *conf;
6868         const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
6869         const struct rte_flow_item *port_id_item = NULL;
6870         bool def_policy = false;
6871         uint16_t udp_dport = 0;
6872
6873         if (items == NULL)
6874                 return -1;
6875         tunnel = is_tunnel_offload_active(dev) ?
6876                  mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
6877         if (tunnel) {
6878                 if (!priv->config.dv_flow_en)
6879                         return rte_flow_error_set
6880                                 (error, ENOTSUP,
6881                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6882                                  NULL, "tunnel offload requires DV flow interface");
6883                 if (priv->representor)
6884                         return rte_flow_error_set
6885                                 (error, ENOTSUP,
6886                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6887                                  NULL, "decap not supported for VF representor");
6888                 if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
6889                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6890                 else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
6891                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6892                                         MLX5_FLOW_ACTION_DECAP;
6893                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6894                                         (dev, attr, tunnel, tof_rule_type);
6895         }
6896         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6897         if (ret < 0)
6898                 return ret;
6899         is_root = (uint64_t)ret;
6900         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6901                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6902                 int type = items->type;
6903
6904                 if (!mlx5_flow_os_item_supported(type))
6905                         return rte_flow_error_set(error, ENOTSUP,
6906                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6907                                                   NULL, "item not supported");
6908                 switch (type) {
6909                 case RTE_FLOW_ITEM_TYPE_VOID:
6910                         break;
6911                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6912                         ret = flow_dv_validate_item_port_id
6913                                         (dev, items, attr, item_flags, error);
6914                         if (ret < 0)
6915                                 return ret;
6916                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6917                         port_id_item = items;
6918                         break;
6919                 case RTE_FLOW_ITEM_TYPE_ETH:
6920                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6921                                                           true, error);
6922                         if (ret < 0)
6923                                 return ret;
6924                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6925                                              MLX5_FLOW_LAYER_OUTER_L2;
6926                         if (items->mask != NULL && items->spec != NULL) {
6927                                 ether_type =
6928                                         ((const struct rte_flow_item_eth *)
6929                                          items->spec)->type;
6930                                 ether_type &=
6931                                         ((const struct rte_flow_item_eth *)
6932                                          items->mask)->type;
6933                                 ether_type = rte_be_to_cpu_16(ether_type);
6934                         } else {
6935                                 ether_type = 0;
6936                         }
6937                         break;
6938                 case RTE_FLOW_ITEM_TYPE_VLAN:
6939                         ret = flow_dv_validate_item_vlan(items, item_flags,
6940                                                          dev, error);
6941                         if (ret < 0)
6942                                 return ret;
6943                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6944                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6945                         if (items->mask != NULL && items->spec != NULL) {
6946                                 ether_type =
6947                                         ((const struct rte_flow_item_vlan *)
6948                                          items->spec)->inner_type;
6949                                 ether_type &=
6950                                         ((const struct rte_flow_item_vlan *)
6951                                          items->mask)->inner_type;
6952                                 ether_type = rte_be_to_cpu_16(ether_type);
6953                         } else {
6954                                 ether_type = 0;
6955                         }
6956                         /* Store outer VLAN mask for of_push_vlan action. */
6957                         if (!tunnel)
6958                                 vlan_m = items->mask;
6959                         break;
6960                 case RTE_FLOW_ITEM_TYPE_IPV4:
6961                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6962                                                   &item_flags, &tunnel);
6963                         ret = flow_dv_validate_item_ipv4(dev, items, item_flags,
6964                                                          last_item, ether_type,
6965                                                          error);
6966                         if (ret < 0)
6967                                 return ret;
6968                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
6969                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
6970                         if (items->mask != NULL &&
6971                             ((const struct rte_flow_item_ipv4 *)
6972                              items->mask)->hdr.next_proto_id) {
6973                                 next_protocol =
6974                                         ((const struct rte_flow_item_ipv4 *)
6975                                          (items->spec))->hdr.next_proto_id;
6976                                 next_protocol &=
6977                                         ((const struct rte_flow_item_ipv4 *)
6978                                          (items->mask))->hdr.next_proto_id;
6979                         } else {
6980                                 /* Reset for inner layer. */
6981                                 next_protocol = 0xff;
6982                         }
6983                         break;
6984                 case RTE_FLOW_ITEM_TYPE_IPV6:
6985                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6986                                                   &item_flags, &tunnel);
6987                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
6988                                                            last_item,
6989                                                            ether_type,
6990                                                            &nic_ipv6_mask,
6991                                                            error);
6992                         if (ret < 0)
6993                                 return ret;
6994                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
6995                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
6996                         if (items->mask != NULL &&
6997                             ((const struct rte_flow_item_ipv6 *)
6998                              items->mask)->hdr.proto) {
6999                                 item_ipv6_proto =
7000                                         ((const struct rte_flow_item_ipv6 *)
7001                                          items->spec)->hdr.proto;
7002                                 next_protocol =
7003                                         ((const struct rte_flow_item_ipv6 *)
7004                                          items->spec)->hdr.proto;
7005                                 next_protocol &=
7006                                         ((const struct rte_flow_item_ipv6 *)
7007                                          items->mask)->hdr.proto;
7008                         } else {
7009                                 /* Reset for inner layer. */
7010                                 next_protocol = 0xff;
7011                         }
7012                         break;
7013                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
7014                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
7015                                                                   item_flags,
7016                                                                   error);
7017                         if (ret < 0)
7018                                 return ret;
7019                         last_item = tunnel ?
7020                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
7021                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
7022                         if (items->mask != NULL &&
7023                             ((const struct rte_flow_item_ipv6_frag_ext *)
7024                              items->mask)->hdr.next_header) {
7025                                 next_protocol =
7026                                 ((const struct rte_flow_item_ipv6_frag_ext *)
7027                                  items->spec)->hdr.next_header;
7028                                 next_protocol &=
7029                                 ((const struct rte_flow_item_ipv6_frag_ext *)
7030                                  items->mask)->hdr.next_header;
7031                         } else {
7032                                 /* Reset for inner layer. */
7033                                 next_protocol = 0xff;
7034                         }
7035                         break;
7036                 case RTE_FLOW_ITEM_TYPE_TCP:
7037                         ret = mlx5_flow_validate_item_tcp
7038                                                 (items, item_flags,
7039                                                  next_protocol,
7040                                                  &nic_tcp_mask,
7041                                                  error);
7042                         if (ret < 0)
7043                                 return ret;
7044                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
7045                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
7046                         break;
7047                 case RTE_FLOW_ITEM_TYPE_UDP:
7048                         ret = mlx5_flow_validate_item_udp(items, item_flags,
7049                                                           next_protocol,
7050                                                           error);
7051                         const struct rte_flow_item_udp *spec = items->spec;
7052                         const struct rte_flow_item_udp *mask = items->mask;
7053                         if (!mask)
7054                                 mask = &rte_flow_item_udp_mask;
7055                         if (spec != NULL)
7056                                 udp_dport = rte_be_to_cpu_16
7057                                                 (spec->hdr.dst_port &
7058                                                  mask->hdr.dst_port);
7059                         if (ret < 0)
7060                                 return ret;
7061                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
7062                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
7063                         break;
7064                 case RTE_FLOW_ITEM_TYPE_GRE:
7065                         ret = mlx5_flow_validate_item_gre(items, item_flags,
7066                                                           next_protocol, error);
7067                         if (ret < 0)
7068                                 return ret;
7069                         gre_item = items;
7070                         last_item = MLX5_FLOW_LAYER_GRE;
7071                         break;
7072                 case RTE_FLOW_ITEM_TYPE_NVGRE:
7073                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
7074                                                             next_protocol,
7075                                                             error);
7076                         if (ret < 0)
7077                                 return ret;
7078                         last_item = MLX5_FLOW_LAYER_NVGRE;
7079                         break;
7080                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
7081                         ret = mlx5_flow_validate_item_gre_key
7082                                 (items, item_flags, gre_item, error);
7083                         if (ret < 0)
7084                                 return ret;
7085                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
7086                         break;
7087                 case RTE_FLOW_ITEM_TYPE_VXLAN:
7088                         ret = mlx5_flow_validate_item_vxlan(dev, udp_dport,
7089                                                             items, item_flags,
7090                                                             attr, error);
7091                         if (ret < 0)
7092                                 return ret;
7093                         last_item = MLX5_FLOW_LAYER_VXLAN;
7094                         break;
7095                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7096                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
7097                                                                 item_flags, dev,
7098                                                                 error);
7099                         if (ret < 0)
7100                                 return ret;
7101                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
7102                         break;
7103                 case RTE_FLOW_ITEM_TYPE_GENEVE:
7104                         ret = mlx5_flow_validate_item_geneve(items,
7105                                                              item_flags, dev,
7106                                                              error);
7107                         if (ret < 0)
7108                                 return ret;
7109                         geneve_item = items;
7110                         last_item = MLX5_FLOW_LAYER_GENEVE;
7111                         break;
7112                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
7113                         ret = mlx5_flow_validate_item_geneve_opt(items,
7114                                                                  last_item,
7115                                                                  geneve_item,
7116                                                                  dev,
7117                                                                  error);
7118                         if (ret < 0)
7119                                 return ret;
7120                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
7121                         break;
7122                 case RTE_FLOW_ITEM_TYPE_MPLS:
7123                         ret = mlx5_flow_validate_item_mpls(dev, items,
7124                                                            item_flags,
7125                                                            last_item, error);
7126                         if (ret < 0)
7127                                 return ret;
7128                         last_item = MLX5_FLOW_LAYER_MPLS;
7129                         break;
7130
7131                 case RTE_FLOW_ITEM_TYPE_MARK:
7132                         ret = flow_dv_validate_item_mark(dev, items, attr,
7133                                                          error);
7134                         if (ret < 0)
7135                                 return ret;
7136                         last_item = MLX5_FLOW_ITEM_MARK;
7137                         break;
7138                 case RTE_FLOW_ITEM_TYPE_META:
7139                         ret = flow_dv_validate_item_meta(dev, items, attr,
7140                                                          error);
7141                         if (ret < 0)
7142                                 return ret;
7143                         last_item = MLX5_FLOW_ITEM_METADATA;
7144                         break;
7145                 case RTE_FLOW_ITEM_TYPE_ICMP:
7146                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
7147                                                            next_protocol,
7148                                                            error);
7149                         if (ret < 0)
7150                                 return ret;
7151                         last_item = MLX5_FLOW_LAYER_ICMP;
7152                         break;
7153                 case RTE_FLOW_ITEM_TYPE_ICMP6:
7154                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
7155                                                             next_protocol,
7156                                                             error);
7157                         if (ret < 0)
7158                                 return ret;
7159                         item_ipv6_proto = IPPROTO_ICMPV6;
7160                         last_item = MLX5_FLOW_LAYER_ICMP6;
7161                         break;
7162                 case RTE_FLOW_ITEM_TYPE_TAG:
7163                         ret = flow_dv_validate_item_tag(dev, items,
7164                                                         attr, error);
7165                         if (ret < 0)
7166                                 return ret;
7167                         last_item = MLX5_FLOW_ITEM_TAG;
7168                         break;
7169                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
7170                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
7171                         break;
7172                 case RTE_FLOW_ITEM_TYPE_GTP:
7173                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
7174                                                         error);
7175                         if (ret < 0)
7176                                 return ret;
7177                         gtp_item = items;
7178                         last_item = MLX5_FLOW_LAYER_GTP;
7179                         break;
7180                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
7181                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
7182                                                             gtp_item, attr,
7183                                                             error);
7184                         if (ret < 0)
7185                                 return ret;
7186                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
7187                         break;
7188                 case RTE_FLOW_ITEM_TYPE_ECPRI:
7189                         /* Capacity will be checked in the translate stage. */
7190                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
7191                                                             last_item,
7192                                                             ether_type,
7193                                                             &nic_ecpri_mask,
7194                                                             error);
7195                         if (ret < 0)
7196                                 return ret;
7197                         last_item = MLX5_FLOW_LAYER_ECPRI;
7198                         break;
7199                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
7200                         ret = flow_dv_validate_item_integrity(dev, items,
7201                                                               item_flags,
7202                                                               &last_item,
7203                                                               integrity_items,
7204                                                               error);
7205                         if (ret < 0)
7206                                 return ret;
7207                         break;
7208                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
7209                         ret = flow_dv_validate_item_aso_ct(dev, items,
7210                                                            &item_flags, error);
7211                         if (ret < 0)
7212                                 return ret;
7213                         break;
7214                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
7215                         /* tunnel offload item was processed before
7216                          * list it here as a supported type
7217                          */
7218                         break;
7219                 case RTE_FLOW_ITEM_TYPE_FLEX:
7220                         ret = flow_dv_validate_item_flex(dev, items, item_flags,
7221                                                          &last_item,
7222                                                          tunnel != 0, error);
7223                         if (ret < 0)
7224                                 return ret;
7225                         break;
7226                 default:
7227                         return rte_flow_error_set(error, ENOTSUP,
7228                                                   RTE_FLOW_ERROR_TYPE_ITEM,
7229                                                   NULL, "item not supported");
7230                 }
7231                 item_flags |= last_item;
7232         }
7233         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
7234                 ret = flow_dv_validate_item_integrity_post(integrity_items,
7235                                                            item_flags, error);
7236                 if (ret)
7237                         return ret;
7238         }
7239         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
7240                 int type = actions->type;
7241                 bool shared_count = false;
7242
7243                 if (!mlx5_flow_os_action_supported(type))
7244                         return rte_flow_error_set(error, ENOTSUP,
7245                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7246                                                   actions,
7247                                                   "action not supported");
7248                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
7249                         return rte_flow_error_set(error, ENOTSUP,
7250                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7251                                                   actions, "too many actions");
7252                 if (action_flags &
7253                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
7254                         return rte_flow_error_set(error, ENOTSUP,
7255                                 RTE_FLOW_ERROR_TYPE_ACTION,
7256                                 NULL, "meter action with policy "
7257                                 "must be the last action");
7258                 switch (type) {
7259                 case RTE_FLOW_ACTION_TYPE_VOID:
7260                         break;
7261                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7262                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
7263                         ret = flow_dv_validate_action_port_id(dev,
7264                                                               action_flags,
7265                                                               actions,
7266                                                               attr,
7267                                                               error);
7268                         if (ret)
7269                                 return ret;
7270                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7271                         ++actions_n;
7272                         break;
7273                 case RTE_FLOW_ACTION_TYPE_FLAG:
7274                         ret = flow_dv_validate_action_flag(dev, action_flags,
7275                                                            attr, error);
7276                         if (ret < 0)
7277                                 return ret;
7278                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7279                                 /* Count all modify-header actions as one. */
7280                                 if (!(action_flags &
7281                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7282                                         ++actions_n;
7283                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
7284                                                 MLX5_FLOW_ACTION_MARK_EXT;
7285                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7286                                         modify_after_mirror = 1;
7287
7288                         } else {
7289                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
7290                                 ++actions_n;
7291                         }
7292                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7293                         break;
7294                 case RTE_FLOW_ACTION_TYPE_MARK:
7295                         ret = flow_dv_validate_action_mark(dev, actions,
7296                                                            action_flags,
7297                                                            attr, error);
7298                         if (ret < 0)
7299                                 return ret;
7300                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7301                                 /* Count all modify-header actions as one. */
7302                                 if (!(action_flags &
7303                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7304                                         ++actions_n;
7305                                 action_flags |= MLX5_FLOW_ACTION_MARK |
7306                                                 MLX5_FLOW_ACTION_MARK_EXT;
7307                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7308                                         modify_after_mirror = 1;
7309                         } else {
7310                                 action_flags |= MLX5_FLOW_ACTION_MARK;
7311                                 ++actions_n;
7312                         }
7313                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7314                         break;
7315                 case RTE_FLOW_ACTION_TYPE_SET_META:
7316                         ret = flow_dv_validate_action_set_meta(dev, actions,
7317                                                                action_flags,
7318                                                                attr, error);
7319                         if (ret < 0)
7320                                 return ret;
7321                         /* Count all modify-header actions as one action. */
7322                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7323                                 ++actions_n;
7324                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7325                                 modify_after_mirror = 1;
7326                         action_flags |= MLX5_FLOW_ACTION_SET_META;
7327                         rw_act_num += MLX5_ACT_NUM_SET_META;
7328                         break;
7329                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7330                         ret = flow_dv_validate_action_set_tag(dev, actions,
7331                                                               action_flags,
7332                                                               attr, error);
7333                         if (ret < 0)
7334                                 return ret;
7335                         /* Count all modify-header actions as one action. */
7336                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7337                                 ++actions_n;
7338                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7339                                 modify_after_mirror = 1;
7340                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7341                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7342                         break;
7343                 case RTE_FLOW_ACTION_TYPE_DROP:
7344                         ret = mlx5_flow_validate_action_drop(action_flags,
7345                                                              attr, error);
7346                         if (ret < 0)
7347                                 return ret;
7348                         action_flags |= MLX5_FLOW_ACTION_DROP;
7349                         ++actions_n;
7350                         break;
7351                 case RTE_FLOW_ACTION_TYPE_QUEUE:
7352                         ret = mlx5_flow_validate_action_queue(actions,
7353                                                               action_flags, dev,
7354                                                               attr, error);
7355                         if (ret < 0)
7356                                 return ret;
7357                         queue_index = ((const struct rte_flow_action_queue *)
7358                                                         (actions->conf))->index;
7359                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
7360                         ++actions_n;
7361                         break;
7362                 case RTE_FLOW_ACTION_TYPE_RSS:
7363                         rss = actions->conf;
7364                         ret = mlx5_flow_validate_action_rss(actions,
7365                                                             action_flags, dev,
7366                                                             attr, item_flags,
7367                                                             error);
7368                         if (ret < 0)
7369                                 return ret;
7370                         if (rss && sample_rss &&
7371                             (sample_rss->level != rss->level ||
7372                             sample_rss->types != rss->types))
7373                                 return rte_flow_error_set(error, ENOTSUP,
7374                                         RTE_FLOW_ERROR_TYPE_ACTION,
7375                                         NULL,
7376                                         "Can't use the different RSS types "
7377                                         "or level in the same flow");
7378                         if (rss != NULL && rss->queue_num)
7379                                 queue_index = rss->queue[0];
7380                         action_flags |= MLX5_FLOW_ACTION_RSS;
7381                         ++actions_n;
7382                         break;
7383                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7384                         ret =
7385                         mlx5_flow_validate_action_default_miss(action_flags,
7386                                         attr, error);
7387                         if (ret < 0)
7388                                 return ret;
7389                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7390                         ++actions_n;
7391                         break;
7392                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
7393                         shared_count = true;
7394                         /* fall-through. */
7395                 case RTE_FLOW_ACTION_TYPE_COUNT:
7396                         ret = flow_dv_validate_action_count(dev, shared_count,
7397                                                             action_flags,
7398                                                             error);
7399                         if (ret < 0)
7400                                 return ret;
7401                         action_flags |= MLX5_FLOW_ACTION_COUNT;
7402                         ++actions_n;
7403                         break;
7404                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7405                         if (flow_dv_validate_action_pop_vlan(dev,
7406                                                              action_flags,
7407                                                              actions,
7408                                                              item_flags, attr,
7409                                                              error))
7410                                 return -rte_errno;
7411                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7412                                 modify_after_mirror = 1;
7413                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7414                         ++actions_n;
7415                         break;
7416                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7417                         ret = flow_dv_validate_action_push_vlan(dev,
7418                                                                 action_flags,
7419                                                                 vlan_m,
7420                                                                 actions, attr,
7421                                                                 error);
7422                         if (ret < 0)
7423                                 return ret;
7424                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7425                                 modify_after_mirror = 1;
7426                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7427                         ++actions_n;
7428                         break;
7429                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7430                         ret = flow_dv_validate_action_set_vlan_pcp
7431                                                 (action_flags, actions, error);
7432                         if (ret < 0)
7433                                 return ret;
7434                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7435                                 modify_after_mirror = 1;
7436                         /* Count PCP with push_vlan command. */
7437                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
7438                         break;
7439                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7440                         ret = flow_dv_validate_action_set_vlan_vid
7441                                                 (item_flags, action_flags,
7442                                                  actions, error);
7443                         if (ret < 0)
7444                                 return ret;
7445                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7446                                 modify_after_mirror = 1;
7447                         /* Count VID with push_vlan command. */
7448                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7449                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
7450                         break;
7451                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7452                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7453                         ret = flow_dv_validate_action_l2_encap(dev,
7454                                                                action_flags,
7455                                                                actions, attr,
7456                                                                error);
7457                         if (ret < 0)
7458                                 return ret;
7459                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
7460                         ++actions_n;
7461                         break;
7462                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7463                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7464                         ret = flow_dv_validate_action_decap(dev, action_flags,
7465                                                             actions, item_flags,
7466                                                             attr, error);
7467                         if (ret < 0)
7468                                 return ret;
7469                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7470                                 modify_after_mirror = 1;
7471                         action_flags |= MLX5_FLOW_ACTION_DECAP;
7472                         ++actions_n;
7473                         break;
7474                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7475                         ret = flow_dv_validate_action_raw_encap_decap
7476                                 (dev, NULL, actions->conf, attr, &action_flags,
7477                                  &actions_n, actions, item_flags, error);
7478                         if (ret < 0)
7479                                 return ret;
7480                         break;
7481                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7482                         decap = actions->conf;
7483                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
7484                                 ;
7485                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7486                                 encap = NULL;
7487                                 actions--;
7488                         } else {
7489                                 encap = actions->conf;
7490                         }
7491                         ret = flow_dv_validate_action_raw_encap_decap
7492                                            (dev,
7493                                             decap ? decap : &empty_decap, encap,
7494                                             attr, &action_flags, &actions_n,
7495                                             actions, item_flags, error);
7496                         if (ret < 0)
7497                                 return ret;
7498                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7499                             (action_flags & MLX5_FLOW_ACTION_DECAP))
7500                                 modify_after_mirror = 1;
7501                         break;
7502                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7503                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7504                         ret = flow_dv_validate_action_modify_mac(action_flags,
7505                                                                  actions,
7506                                                                  item_flags,
7507                                                                  error);
7508                         if (ret < 0)
7509                                 return ret;
7510                         /* Count all modify-header actions as one action. */
7511                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7512                                 ++actions_n;
7513                         action_flags |= actions->type ==
7514                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7515                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
7516                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
7517                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7518                                 modify_after_mirror = 1;
7519                         /*
7520                          * Even if the source and destination MAC addresses have
7521                          * overlap in the header with 4B alignment, the convert
7522                          * function will handle them separately and 4 SW actions
7523                          * will be created. And 2 actions will be added each
7524                          * time no matter how many bytes of address will be set.
7525                          */
7526                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
7527                         break;
7528                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7529                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7530                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
7531                                                                   actions,
7532                                                                   item_flags,
7533                                                                   error);
7534                         if (ret < 0)
7535                                 return ret;
7536                         /* Count all modify-header actions as one action. */
7537                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7538                                 ++actions_n;
7539                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7540                                 modify_after_mirror = 1;
7541                         action_flags |= actions->type ==
7542                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7543                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
7544                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
7545                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
7546                         break;
7547                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7548                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7549                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
7550                                                                   actions,
7551                                                                   item_flags,
7552                                                                   error);
7553                         if (ret < 0)
7554                                 return ret;
7555                         if (item_ipv6_proto == IPPROTO_ICMPV6)
7556                                 return rte_flow_error_set(error, ENOTSUP,
7557                                         RTE_FLOW_ERROR_TYPE_ACTION,
7558                                         actions,
7559                                         "Can't change header "
7560                                         "with ICMPv6 proto");
7561                         /* Count all modify-header actions as one action. */
7562                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7563                                 ++actions_n;
7564                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7565                                 modify_after_mirror = 1;
7566                         action_flags |= actions->type ==
7567                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7568                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7569                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
7570                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
7571                         break;
7572                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7573                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7574                         ret = flow_dv_validate_action_modify_tp(action_flags,
7575                                                                 actions,
7576                                                                 item_flags,
7577                                                                 error);
7578                         if (ret < 0)
7579                                 return ret;
7580                         /* Count all modify-header actions as one action. */
7581                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7582                                 ++actions_n;
7583                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7584                                 modify_after_mirror = 1;
7585                         action_flags |= actions->type ==
7586                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7587                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
7588                                                 MLX5_FLOW_ACTION_SET_TP_DST;
7589                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7590                         break;
7591                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7592                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7593                         ret = flow_dv_validate_action_modify_ttl(action_flags,
7594                                                                  actions,
7595                                                                  item_flags,
7596                                                                  error);
7597                         if (ret < 0)
7598                                 return ret;
7599                         /* Count all modify-header actions as one action. */
7600                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7601                                 ++actions_n;
7602                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7603                                 modify_after_mirror = 1;
7604                         action_flags |= actions->type ==
7605                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
7606                                                 MLX5_FLOW_ACTION_SET_TTL :
7607                                                 MLX5_FLOW_ACTION_DEC_TTL;
7608                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7609                         break;
7610                 case RTE_FLOW_ACTION_TYPE_JUMP:
7611                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7612                                                            action_flags,
7613                                                            attr, external,
7614                                                            error);
7615                         if (ret)
7616                                 return ret;
7617                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7618                             fdb_mirror_limit)
7619                                 return rte_flow_error_set(error, EINVAL,
7620                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7621                                                   NULL,
7622                                                   "sample and jump action combination is not supported");
7623                         ++actions_n;
7624                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7625                         break;
7626                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7627                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7628                         ret = flow_dv_validate_action_modify_tcp_seq
7629                                                                 (action_flags,
7630                                                                  actions,
7631                                                                  item_flags,
7632                                                                  error);
7633                         if (ret < 0)
7634                                 return ret;
7635                         /* Count all modify-header actions as one action. */
7636                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7637                                 ++actions_n;
7638                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7639                                 modify_after_mirror = 1;
7640                         action_flags |= actions->type ==
7641                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7642                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7643                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7644                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7645                         break;
7646                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7647                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7648                         ret = flow_dv_validate_action_modify_tcp_ack
7649                                                                 (action_flags,
7650                                                                  actions,
7651                                                                  item_flags,
7652                                                                  error);
7653                         if (ret < 0)
7654                                 return ret;
7655                         /* Count all modify-header actions as one action. */
7656                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7657                                 ++actions_n;
7658                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7659                                 modify_after_mirror = 1;
7660                         action_flags |= actions->type ==
7661                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7662                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
7663                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7664                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7665                         break;
7666                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7667                         break;
7668                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7669                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7670                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7671                         break;
7672                 case RTE_FLOW_ACTION_TYPE_METER:
7673                         ret = mlx5_flow_validate_action_meter(dev,
7674                                                               action_flags,
7675                                                               actions, attr,
7676                                                               port_id_item,
7677                                                               &def_policy,
7678                                                               error);
7679                         if (ret < 0)
7680                                 return ret;
7681                         action_flags |= MLX5_FLOW_ACTION_METER;
7682                         if (!def_policy)
7683                                 action_flags |=
7684                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7685                         ++actions_n;
7686                         /* Meter action will add one more TAG action. */
7687                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7688                         break;
7689                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7690                         if (!attr->transfer && !attr->group)
7691                                 return rte_flow_error_set(error, ENOTSUP,
7692                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7693                                                                            NULL,
7694                           "Shared ASO age action is not supported for group 0");
7695                         if (action_flags & MLX5_FLOW_ACTION_AGE)
7696                                 return rte_flow_error_set
7697                                                   (error, EINVAL,
7698                                                    RTE_FLOW_ERROR_TYPE_ACTION,
7699                                                    NULL,
7700                                                    "duplicate age actions set");
7701                         action_flags |= MLX5_FLOW_ACTION_AGE;
7702                         ++actions_n;
7703                         break;
7704                 case RTE_FLOW_ACTION_TYPE_AGE:
7705                         ret = flow_dv_validate_action_age(action_flags,
7706                                                           actions, dev,
7707                                                           error);
7708                         if (ret < 0)
7709                                 return ret;
7710                         /*
7711                          * Validate the regular AGE action (using counter)
7712                          * mutual exclusion with share counter actions.
7713                          */
7714                         if (!priv->sh->flow_hit_aso_en) {
7715                                 if (shared_count)
7716                                         return rte_flow_error_set
7717                                                 (error, EINVAL,
7718                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7719                                                 NULL,
7720                                                 "old age and shared count combination is not supported");
7721                                 if (sample_count)
7722                                         return rte_flow_error_set
7723                                                 (error, EINVAL,
7724                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7725                                                 NULL,
7726                                                 "old age action and count must be in the same sub flow");
7727                         }
7728                         action_flags |= MLX5_FLOW_ACTION_AGE;
7729                         ++actions_n;
7730                         break;
7731                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7732                         ret = flow_dv_validate_action_modify_ipv4_dscp
7733                                                          (action_flags,
7734                                                           actions,
7735                                                           item_flags,
7736                                                           error);
7737                         if (ret < 0)
7738                                 return ret;
7739                         /* Count all modify-header actions as one action. */
7740                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7741                                 ++actions_n;
7742                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7743                                 modify_after_mirror = 1;
7744                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7745                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7746                         break;
7747                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7748                         ret = flow_dv_validate_action_modify_ipv6_dscp
7749                                                                 (action_flags,
7750                                                                  actions,
7751                                                                  item_flags,
7752                                                                  error);
7753                         if (ret < 0)
7754                                 return ret;
7755                         /* Count all modify-header actions as one action. */
7756                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7757                                 ++actions_n;
7758                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7759                                 modify_after_mirror = 1;
7760                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7761                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7762                         break;
7763                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7764                         ret = flow_dv_validate_action_sample(&action_flags,
7765                                                              actions, dev,
7766                                                              attr, item_flags,
7767                                                              rss, &sample_rss,
7768                                                              &sample_count,
7769                                                              &fdb_mirror_limit,
7770                                                              error);
7771                         if (ret < 0)
7772                                 return ret;
7773                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7774                         ++actions_n;
7775                         break;
7776                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7777                         ret = flow_dv_validate_action_modify_field(dev,
7778                                                                    action_flags,
7779                                                                    actions,
7780                                                                    attr,
7781                                                                    error);
7782                         if (ret < 0)
7783                                 return ret;
7784                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7785                                 modify_after_mirror = 1;
7786                         /* Count all modify-header actions as one action. */
7787                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7788                                 ++actions_n;
7789                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7790                         rw_act_num += ret;
7791                         break;
7792                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7793                         ret = flow_dv_validate_action_aso_ct(dev, action_flags,
7794                                                              item_flags, attr,
7795                                                              error);
7796                         if (ret < 0)
7797                                 return ret;
7798                         action_flags |= MLX5_FLOW_ACTION_CT;
7799                         break;
7800                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7801                         /* tunnel offload action was processed before
7802                          * list it here as a supported type
7803                          */
7804                         break;
7805                 default:
7806                         return rte_flow_error_set(error, ENOTSUP,
7807                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7808                                                   actions,
7809                                                   "action not supported");
7810                 }
7811         }
7812         /*
7813          * Validate actions in flow rules
7814          * - Explicit decap action is prohibited by the tunnel offload API.
7815          * - Drop action in tunnel steer rule is prohibited by the API.
7816          * - Application cannot use MARK action because it's value can mask
7817          *   tunnel default miss nitification.
7818          * - JUMP in tunnel match rule has no support in current PMD
7819          *   implementation.
7820          * - TAG & META are reserved for future uses.
7821          */
7822         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7823                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
7824                                             MLX5_FLOW_ACTION_MARK     |
7825                                             MLX5_FLOW_ACTION_SET_TAG  |
7826                                             MLX5_FLOW_ACTION_SET_META |
7827                                             MLX5_FLOW_ACTION_DROP;
7828
7829                 if (action_flags & bad_actions_mask)
7830                         return rte_flow_error_set
7831                                         (error, EINVAL,
7832                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7833                                         "Invalid RTE action in tunnel "
7834                                         "set decap rule");
7835                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7836                         return rte_flow_error_set
7837                                         (error, EINVAL,
7838                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7839                                         "tunnel set decap rule must terminate "
7840                                         "with JUMP");
7841                 if (!attr->ingress)
7842                         return rte_flow_error_set
7843                                         (error, EINVAL,
7844                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7845                                         "tunnel flows for ingress traffic only");
7846         }
7847         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7848                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
7849                                             MLX5_FLOW_ACTION_MARK    |
7850                                             MLX5_FLOW_ACTION_SET_TAG |
7851                                             MLX5_FLOW_ACTION_SET_META;
7852
7853                 if (action_flags & bad_actions_mask)
7854                         return rte_flow_error_set
7855                                         (error, EINVAL,
7856                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7857                                         "Invalid RTE action in tunnel "
7858                                         "set match rule");
7859         }
7860         /*
7861          * Validate the drop action mutual exclusion with other actions.
7862          * Drop action is mutually-exclusive with any other action, except for
7863          * Count action.
7864          * Drop action compatibility with tunnel offload was already validated.
7865          */
7866         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7867                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
7868         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7869             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7870                 return rte_flow_error_set(error, EINVAL,
7871                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7872                                           "Drop action is mutually-exclusive "
7873                                           "with any other action, except for "
7874                                           "Count action");
7875         /* Eswitch has few restrictions on using items and actions */
7876         if (attr->transfer) {
7877                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7878                     action_flags & MLX5_FLOW_ACTION_FLAG)
7879                         return rte_flow_error_set(error, ENOTSUP,
7880                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7881                                                   NULL,
7882                                                   "unsupported action FLAG");
7883                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7884                     action_flags & MLX5_FLOW_ACTION_MARK)
7885                         return rte_flow_error_set(error, ENOTSUP,
7886                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7887                                                   NULL,
7888                                                   "unsupported action MARK");
7889                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7890                         return rte_flow_error_set(error, ENOTSUP,
7891                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7892                                                   NULL,
7893                                                   "unsupported action QUEUE");
7894                 if (action_flags & MLX5_FLOW_ACTION_RSS)
7895                         return rte_flow_error_set(error, ENOTSUP,
7896                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7897                                                   NULL,
7898                                                   "unsupported action RSS");
7899                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7900                         return rte_flow_error_set(error, EINVAL,
7901                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7902                                                   actions,
7903                                                   "no fate action is found");
7904         } else {
7905                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7906                         return rte_flow_error_set(error, EINVAL,
7907                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7908                                                   actions,
7909                                                   "no fate action is found");
7910         }
7911         /*
7912          * Continue validation for Xcap and VLAN actions.
7913          * If hairpin is working in explicit TX rule mode, there is no actions
7914          * splitting and the validation of hairpin ingress flow should be the
7915          * same as other standard flows.
7916          */
7917         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7918                              MLX5_FLOW_VLAN_ACTIONS)) &&
7919             (queue_index == 0xFFFF ||
7920              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7921              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7922              conf->tx_explicit != 0))) {
7923                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7924                     MLX5_FLOW_XCAP_ACTIONS)
7925                         return rte_flow_error_set(error, ENOTSUP,
7926                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7927                                                   NULL, "encap and decap "
7928                                                   "combination aren't supported");
7929                 if (!attr->transfer && attr->ingress) {
7930                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7931                                 return rte_flow_error_set
7932                                                 (error, ENOTSUP,
7933                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7934                                                  NULL, "encap is not supported"
7935                                                  " for ingress traffic");
7936                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7937                                 return rte_flow_error_set
7938                                                 (error, ENOTSUP,
7939                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7940                                                  NULL, "push VLAN action not "
7941                                                  "supported for ingress");
7942                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7943                                         MLX5_FLOW_VLAN_ACTIONS)
7944                                 return rte_flow_error_set
7945                                                 (error, ENOTSUP,
7946                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7947                                                  NULL, "no support for "
7948                                                  "multiple VLAN actions");
7949                 }
7950         }
7951         if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
7952                 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
7953                         ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
7954                         attr->ingress)
7955                         return rte_flow_error_set
7956                                 (error, ENOTSUP,
7957                                 RTE_FLOW_ERROR_TYPE_ACTION,
7958                                 NULL, "fate action not supported for "
7959                                 "meter with policy");
7960                 if (attr->egress) {
7961                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
7962                                 return rte_flow_error_set
7963                                         (error, ENOTSUP,
7964                                         RTE_FLOW_ERROR_TYPE_ACTION,
7965                                         NULL, "modify header action in egress "
7966                                         "cannot be done before meter action");
7967                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7968                                 return rte_flow_error_set
7969                                         (error, ENOTSUP,
7970                                         RTE_FLOW_ERROR_TYPE_ACTION,
7971                                         NULL, "encap action in egress "
7972                                         "cannot be done before meter action");
7973                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7974                                 return rte_flow_error_set
7975                                         (error, ENOTSUP,
7976                                         RTE_FLOW_ERROR_TYPE_ACTION,
7977                                         NULL, "push vlan action in egress "
7978                                         "cannot be done before meter action");
7979                 }
7980         }
7981         /*
7982          * Hairpin flow will add one more TAG action in TX implicit mode.
7983          * In TX explicit mode, there will be no hairpin flow ID.
7984          */
7985         if (hairpin > 0)
7986                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7987         /* extra metadata enabled: one more TAG action will be add. */
7988         if (dev_conf->dv_flow_en &&
7989             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
7990             mlx5_flow_ext_mreg_supported(dev))
7991                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7992         if (rw_act_num >
7993                         flow_dv_modify_hdr_action_max(dev, is_root)) {
7994                 return rte_flow_error_set(error, ENOTSUP,
7995                                           RTE_FLOW_ERROR_TYPE_ACTION,
7996                                           NULL, "too many header modify"
7997                                           " actions to support");
7998         }
7999         /* Eswitch egress mirror and modify flow has limitation on CX5 */
8000         if (fdb_mirror_limit && modify_after_mirror)
8001                 return rte_flow_error_set(error, EINVAL,
8002                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
8003                                 "sample before modify action is not supported");
8004         return 0;
8005 }
8006
8007 /**
8008  * Internal preparation function. Allocates the DV flow size,
8009  * this size is constant.
8010  *
8011  * @param[in] dev
8012  *   Pointer to the rte_eth_dev structure.
8013  * @param[in] attr
8014  *   Pointer to the flow attributes.
8015  * @param[in] items
8016  *   Pointer to the list of items.
8017  * @param[in] actions
8018  *   Pointer to the list of actions.
8019  * @param[out] error
8020  *   Pointer to the error structure.
8021  *
8022  * @return
8023  *   Pointer to mlx5_flow object on success,
8024  *   otherwise NULL and rte_errno is set.
8025  */
8026 static struct mlx5_flow *
8027 flow_dv_prepare(struct rte_eth_dev *dev,
8028                 const struct rte_flow_attr *attr __rte_unused,
8029                 const struct rte_flow_item items[] __rte_unused,
8030                 const struct rte_flow_action actions[] __rte_unused,
8031                 struct rte_flow_error *error)
8032 {
8033         uint32_t handle_idx = 0;
8034         struct mlx5_flow *dev_flow;
8035         struct mlx5_flow_handle *dev_handle;
8036         struct mlx5_priv *priv = dev->data->dev_private;
8037         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
8038
8039         MLX5_ASSERT(wks);
8040         wks->skip_matcher_reg = 0;
8041         wks->policy = NULL;
8042         wks->final_policy = NULL;
8043         /* In case of corrupting the memory. */
8044         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
8045                 rte_flow_error_set(error, ENOSPC,
8046                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8047                                    "not free temporary device flow");
8048                 return NULL;
8049         }
8050         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
8051                                    &handle_idx);
8052         if (!dev_handle) {
8053                 rte_flow_error_set(error, ENOMEM,
8054                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8055                                    "not enough memory to create flow handle");
8056                 return NULL;
8057         }
8058         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
8059         dev_flow = &wks->flows[wks->flow_idx++];
8060         memset(dev_flow, 0, sizeof(*dev_flow));
8061         dev_flow->handle = dev_handle;
8062         dev_flow->handle_idx = handle_idx;
8063         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
8064         dev_flow->ingress = attr->ingress;
8065         dev_flow->dv.transfer = attr->transfer;
8066         return dev_flow;
8067 }
8068
8069 #ifdef RTE_LIBRTE_MLX5_DEBUG
8070 /**
8071  * Sanity check for match mask and value. Similar to check_valid_spec() in
8072  * kernel driver. If unmasked bit is present in value, it returns failure.
8073  *
8074  * @param match_mask
8075  *   pointer to match mask buffer.
8076  * @param match_value
8077  *   pointer to match value buffer.
8078  *
8079  * @return
8080  *   0 if valid, -EINVAL otherwise.
8081  */
8082 static int
8083 flow_dv_check_valid_spec(void *match_mask, void *match_value)
8084 {
8085         uint8_t *m = match_mask;
8086         uint8_t *v = match_value;
8087         unsigned int i;
8088
8089         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
8090                 if (v[i] & ~m[i]) {
8091                         DRV_LOG(ERR,
8092                                 "match_value differs from match_criteria"
8093                                 " %p[%u] != %p[%u]",
8094                                 match_value, i, match_mask, i);
8095                         return -EINVAL;
8096                 }
8097         }
8098         return 0;
8099 }
8100 #endif
8101
8102 /**
8103  * Add match of ip_version.
8104  *
8105  * @param[in] group
8106  *   Flow group.
8107  * @param[in] headers_v
8108  *   Values header pointer.
8109  * @param[in] headers_m
8110  *   Masks header pointer.
8111  * @param[in] ip_version
8112  *   The IP version to set.
8113  */
8114 static inline void
8115 flow_dv_set_match_ip_version(uint32_t group,
8116                              void *headers_v,
8117                              void *headers_m,
8118                              uint8_t ip_version)
8119 {
8120         if (group == 0)
8121                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
8122         else
8123                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
8124                          ip_version);
8125         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
8126         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
8127         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
8128 }
8129
8130 /**
8131  * Add Ethernet item to matcher and to the value.
8132  *
8133  * @param[in, out] matcher
8134  *   Flow matcher.
8135  * @param[in, out] key
8136  *   Flow matcher value.
8137  * @param[in] item
8138  *   Flow pattern to translate.
8139  * @param[in] inner
8140  *   Item is inner pattern.
8141  */
8142 static void
8143 flow_dv_translate_item_eth(void *matcher, void *key,
8144                            const struct rte_flow_item *item, int inner,
8145                            uint32_t group)
8146 {
8147         const struct rte_flow_item_eth *eth_m = item->mask;
8148         const struct rte_flow_item_eth *eth_v = item->spec;
8149         const struct rte_flow_item_eth nic_mask = {
8150                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8151                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8152                 .type = RTE_BE16(0xffff),
8153                 .has_vlan = 0,
8154         };
8155         void *hdrs_m;
8156         void *hdrs_v;
8157         char *l24_v;
8158         unsigned int i;
8159
8160         if (!eth_v)
8161                 return;
8162         if (!eth_m)
8163                 eth_m = &nic_mask;
8164         if (inner) {
8165                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8166                                          inner_headers);
8167                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8168         } else {
8169                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8170                                          outer_headers);
8171                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8172         }
8173         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
8174                &eth_m->dst, sizeof(eth_m->dst));
8175         /* The value must be in the range of the mask. */
8176         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
8177         for (i = 0; i < sizeof(eth_m->dst); ++i)
8178                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
8179         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
8180                &eth_m->src, sizeof(eth_m->src));
8181         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
8182         /* The value must be in the range of the mask. */
8183         for (i = 0; i < sizeof(eth_m->dst); ++i)
8184                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
8185         /*
8186          * HW supports match on one Ethertype, the Ethertype following the last
8187          * VLAN tag of the packet (see PRM).
8188          * Set match on ethertype only if ETH header is not followed by VLAN.
8189          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8190          * ethertype, and use ip_version field instead.
8191          * eCPRI over Ether layer will use type value 0xAEFE.
8192          */
8193         if (eth_m->type == 0xFFFF) {
8194                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
8195                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8196                 switch (eth_v->type) {
8197                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8198                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8199                         return;
8200                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
8201                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8202                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8203                         return;
8204                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8205                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8206                         return;
8207                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8208                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8209                         return;
8210                 default:
8211                         break;
8212                 }
8213         }
8214         if (eth_m->has_vlan) {
8215                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8216                 if (eth_v->has_vlan) {
8217                         /*
8218                          * Here, when also has_more_vlan field in VLAN item is
8219                          * not set, only single-tagged packets will be matched.
8220                          */
8221                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8222                         return;
8223                 }
8224         }
8225         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8226                  rte_be_to_cpu_16(eth_m->type));
8227         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
8228         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
8229 }
8230
8231 /**
8232  * Add VLAN item to matcher and to the value.
8233  *
8234  * @param[in, out] dev_flow
8235  *   Flow descriptor.
8236  * @param[in, out] matcher
8237  *   Flow matcher.
8238  * @param[in, out] key
8239  *   Flow matcher value.
8240  * @param[in] item
8241  *   Flow pattern to translate.
8242  * @param[in] inner
8243  *   Item is inner pattern.
8244  */
8245 static void
8246 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
8247                             void *matcher, void *key,
8248                             const struct rte_flow_item *item,
8249                             int inner, uint32_t group)
8250 {
8251         const struct rte_flow_item_vlan *vlan_m = item->mask;
8252         const struct rte_flow_item_vlan *vlan_v = item->spec;
8253         void *hdrs_m;
8254         void *hdrs_v;
8255         uint16_t tci_m;
8256         uint16_t tci_v;
8257
8258         if (inner) {
8259                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8260                                          inner_headers);
8261                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8262         } else {
8263                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8264                                          outer_headers);
8265                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8266                 /*
8267                  * This is workaround, masks are not supported,
8268                  * and pre-validated.
8269                  */
8270                 if (vlan_v)
8271                         dev_flow->handle->vf_vlan.tag =
8272                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
8273         }
8274         /*
8275          * When VLAN item exists in flow, mark packet as tagged,
8276          * even if TCI is not specified.
8277          */
8278         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
8279                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8280                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8281         }
8282         if (!vlan_v)
8283                 return;
8284         if (!vlan_m)
8285                 vlan_m = &rte_flow_item_vlan_mask;
8286         tci_m = rte_be_to_cpu_16(vlan_m->tci);
8287         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
8288         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
8289         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
8290         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
8291         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
8292         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
8293         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
8294         /*
8295          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8296          * ethertype, and use ip_version field instead.
8297          */
8298         if (vlan_m->inner_type == 0xFFFF) {
8299                 switch (vlan_v->inner_type) {
8300                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8301                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8302                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8303                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8304                         return;
8305                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8306                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8307                         return;
8308                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8309                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8310                         return;
8311                 default:
8312                         break;
8313                 }
8314         }
8315         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
8316                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8317                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8318                 /* Only one vlan_tag bit can be set. */
8319                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8320                 return;
8321         }
8322         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8323                  rte_be_to_cpu_16(vlan_m->inner_type));
8324         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
8325                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
8326 }
8327
8328 /**
8329  * Add IPV4 item to matcher and to the value.
8330  *
8331  * @param[in, out] matcher
8332  *   Flow matcher.
8333  * @param[in, out] key
8334  *   Flow matcher value.
8335  * @param[in] item
8336  *   Flow pattern to translate.
8337  * @param[in] inner
8338  *   Item is inner pattern.
8339  * @param[in] group
8340  *   The group to insert the rule.
8341  */
8342 static void
8343 flow_dv_translate_item_ipv4(void *matcher, void *key,
8344                             const struct rte_flow_item *item,
8345                             int inner, uint32_t group)
8346 {
8347         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
8348         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
8349         const struct rte_flow_item_ipv4 nic_mask = {
8350                 .hdr = {
8351                         .src_addr = RTE_BE32(0xffffffff),
8352                         .dst_addr = RTE_BE32(0xffffffff),
8353                         .type_of_service = 0xff,
8354                         .next_proto_id = 0xff,
8355                         .time_to_live = 0xff,
8356                 },
8357         };
8358         void *headers_m;
8359         void *headers_v;
8360         char *l24_m;
8361         char *l24_v;
8362         uint8_t tos, ihl_m, ihl_v;
8363
8364         if (inner) {
8365                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8366                                          inner_headers);
8367                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8368         } else {
8369                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8370                                          outer_headers);
8371                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8372         }
8373         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
8374         if (!ipv4_v)
8375                 return;
8376         if (!ipv4_m)
8377                 ipv4_m = &nic_mask;
8378         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8379                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8380         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8381                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8382         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
8383         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
8384         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8385                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8386         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8387                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8388         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
8389         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
8390         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
8391         ihl_m = ipv4_m->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
8392         ihl_v = ipv4_v->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;
8393         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_ihl, ihl_m);
8394         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_ihl, ihl_m & ihl_v);
8395         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
8396                  ipv4_m->hdr.type_of_service);
8397         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
8398         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
8399                  ipv4_m->hdr.type_of_service >> 2);
8400         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
8401         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8402                  ipv4_m->hdr.next_proto_id);
8403         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8404                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
8405         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8406                  ipv4_m->hdr.time_to_live);
8407         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8408                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
8409         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8410                  !!(ipv4_m->hdr.fragment_offset));
8411         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8412                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
8413 }
8414
8415 /**
8416  * Add IPV6 item to matcher and to the value.
8417  *
8418  * @param[in, out] matcher
8419  *   Flow matcher.
8420  * @param[in, out] key
8421  *   Flow matcher value.
8422  * @param[in] item
8423  *   Flow pattern to translate.
8424  * @param[in] inner
8425  *   Item is inner pattern.
8426  * @param[in] group
8427  *   The group to insert the rule.
8428  */
8429 static void
8430 flow_dv_translate_item_ipv6(void *matcher, void *key,
8431                             const struct rte_flow_item *item,
8432                             int inner, uint32_t group)
8433 {
8434         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
8435         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
8436         const struct rte_flow_item_ipv6 nic_mask = {
8437                 .hdr = {
8438                         .src_addr =
8439                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8440                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8441                         .dst_addr =
8442                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8443                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8444                         .vtc_flow = RTE_BE32(0xffffffff),
8445                         .proto = 0xff,
8446                         .hop_limits = 0xff,
8447                 },
8448         };
8449         void *headers_m;
8450         void *headers_v;
8451         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8452         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8453         char *l24_m;
8454         char *l24_v;
8455         uint32_t vtc_m;
8456         uint32_t vtc_v;
8457         int i;
8458         int size;
8459
8460         if (inner) {
8461                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8462                                          inner_headers);
8463                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8464         } else {
8465                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8466                                          outer_headers);
8467                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8468         }
8469         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
8470         if (!ipv6_v)
8471                 return;
8472         if (!ipv6_m)
8473                 ipv6_m = &nic_mask;
8474         size = sizeof(ipv6_m->hdr.dst_addr);
8475         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8476                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8477         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8478                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8479         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
8480         for (i = 0; i < size; ++i)
8481                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
8482         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8483                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8484         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8485                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8486         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
8487         for (i = 0; i < size; ++i)
8488                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
8489         /* TOS. */
8490         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
8491         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
8492         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
8493         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
8494         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
8495         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
8496         /* Label. */
8497         if (inner) {
8498                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
8499                          vtc_m);
8500                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
8501                          vtc_v);
8502         } else {
8503                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
8504                          vtc_m);
8505                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
8506                          vtc_v);
8507         }
8508         /* Protocol. */
8509         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8510                  ipv6_m->hdr.proto);
8511         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8512                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
8513         /* Hop limit. */
8514         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8515                  ipv6_m->hdr.hop_limits);
8516         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8517                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
8518         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8519                  !!(ipv6_m->has_frag_ext));
8520         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8521                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
8522 }
8523
8524 /**
8525  * Add IPV6 fragment extension item to matcher and to the value.
8526  *
8527  * @param[in, out] matcher
8528  *   Flow matcher.
8529  * @param[in, out] key
8530  *   Flow matcher value.
8531  * @param[in] item
8532  *   Flow pattern to translate.
8533  * @param[in] inner
8534  *   Item is inner pattern.
8535  */
8536 static void
8537 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
8538                                      const struct rte_flow_item *item,
8539                                      int inner)
8540 {
8541         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
8542         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
8543         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
8544                 .hdr = {
8545                         .next_header = 0xff,
8546                         .frag_data = RTE_BE16(0xffff),
8547                 },
8548         };
8549         void *headers_m;
8550         void *headers_v;
8551
8552         if (inner) {
8553                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8554                                          inner_headers);
8555                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8556         } else {
8557                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8558                                          outer_headers);
8559                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8560         }
8561         /* IPv6 fragment extension item exists, so packet is IP fragment. */
8562         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
8563         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
8564         if (!ipv6_frag_ext_v)
8565                 return;
8566         if (!ipv6_frag_ext_m)
8567                 ipv6_frag_ext_m = &nic_mask;
8568         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8569                  ipv6_frag_ext_m->hdr.next_header);
8570         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8571                  ipv6_frag_ext_v->hdr.next_header &
8572                  ipv6_frag_ext_m->hdr.next_header);
8573 }
8574
8575 /**
8576  * Add TCP item to matcher and to the value.
8577  *
8578  * @param[in, out] matcher
8579  *   Flow matcher.
8580  * @param[in, out] key
8581  *   Flow matcher value.
8582  * @param[in] item
8583  *   Flow pattern to translate.
8584  * @param[in] inner
8585  *   Item is inner pattern.
8586  */
8587 static void
8588 flow_dv_translate_item_tcp(void *matcher, void *key,
8589                            const struct rte_flow_item *item,
8590                            int inner)
8591 {
8592         const struct rte_flow_item_tcp *tcp_m = item->mask;
8593         const struct rte_flow_item_tcp *tcp_v = item->spec;
8594         void *headers_m;
8595         void *headers_v;
8596
8597         if (inner) {
8598                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8599                                          inner_headers);
8600                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8601         } else {
8602                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8603                                          outer_headers);
8604                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8605         }
8606         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8607         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8608         if (!tcp_v)
8609                 return;
8610         if (!tcp_m)
8611                 tcp_m = &rte_flow_item_tcp_mask;
8612         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8613                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
8614         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8615                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8616         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8617                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8618         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8619                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8620         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8621                  tcp_m->hdr.tcp_flags);
8622         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8623                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8624 }
8625
8626 /**
8627  * Add UDP item to matcher and to the value.
8628  *
8629  * @param[in, out] matcher
8630  *   Flow matcher.
8631  * @param[in, out] key
8632  *   Flow matcher value.
8633  * @param[in] item
8634  *   Flow pattern to translate.
8635  * @param[in] inner
8636  *   Item is inner pattern.
8637  */
8638 static void
8639 flow_dv_translate_item_udp(void *matcher, void *key,
8640                            const struct rte_flow_item *item,
8641                            int inner)
8642 {
8643         const struct rte_flow_item_udp *udp_m = item->mask;
8644         const struct rte_flow_item_udp *udp_v = item->spec;
8645         void *headers_m;
8646         void *headers_v;
8647
8648         if (inner) {
8649                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8650                                          inner_headers);
8651                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8652         } else {
8653                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8654                                          outer_headers);
8655                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8656         }
8657         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8658         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8659         if (!udp_v)
8660                 return;
8661         if (!udp_m)
8662                 udp_m = &rte_flow_item_udp_mask;
8663         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8664                  rte_be_to_cpu_16(udp_m->hdr.src_port));
8665         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8666                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8667         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8668                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
8669         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8670                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8671 }
8672
8673 /**
8674  * Add GRE optional Key item to matcher and to the value.
8675  *
8676  * @param[in, out] matcher
8677  *   Flow matcher.
8678  * @param[in, out] key
8679  *   Flow matcher value.
8680  * @param[in] item
8681  *   Flow pattern to translate.
8682  * @param[in] inner
8683  *   Item is inner pattern.
8684  */
8685 static void
8686 flow_dv_translate_item_gre_key(void *matcher, void *key,
8687                                    const struct rte_flow_item *item)
8688 {
8689         const rte_be32_t *key_m = item->mask;
8690         const rte_be32_t *key_v = item->spec;
8691         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8692         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8693         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8694
8695         /* GRE K bit must be on and should already be validated */
8696         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8697         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8698         if (!key_v)
8699                 return;
8700         if (!key_m)
8701                 key_m = &gre_key_default_mask;
8702         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8703                  rte_be_to_cpu_32(*key_m) >> 8);
8704         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8705                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8706         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8707                  rte_be_to_cpu_32(*key_m) & 0xFF);
8708         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8709                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8710 }
8711
8712 /**
8713  * Add GRE item to matcher and to the value.
8714  *
8715  * @param[in, out] matcher
8716  *   Flow matcher.
8717  * @param[in, out] key
8718  *   Flow matcher value.
8719  * @param[in] item
8720  *   Flow pattern to translate.
8721  * @param[in] inner
8722  *   Item is inner pattern.
8723  */
8724 static void
8725 flow_dv_translate_item_gre(void *matcher, void *key,
8726                            const struct rte_flow_item *item,
8727                            int inner)
8728 {
8729         const struct rte_flow_item_gre *gre_m = item->mask;
8730         const struct rte_flow_item_gre *gre_v = item->spec;
8731         void *headers_m;
8732         void *headers_v;
8733         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8734         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8735         struct {
8736                 union {
8737                         __extension__
8738                         struct {
8739                                 uint16_t version:3;
8740                                 uint16_t rsvd0:9;
8741                                 uint16_t s_present:1;
8742                                 uint16_t k_present:1;
8743                                 uint16_t rsvd_bit1:1;
8744                                 uint16_t c_present:1;
8745                         };
8746                         uint16_t value;
8747                 };
8748         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8749
8750         if (inner) {
8751                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8752                                          inner_headers);
8753                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8754         } else {
8755                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8756                                          outer_headers);
8757                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8758         }
8759         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8760         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8761         if (!gre_v)
8762                 return;
8763         if (!gre_m)
8764                 gre_m = &rte_flow_item_gre_mask;
8765         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
8766                  rte_be_to_cpu_16(gre_m->protocol));
8767         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8768                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
8769         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
8770         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
8771         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
8772                  gre_crks_rsvd0_ver_m.c_present);
8773         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
8774                  gre_crks_rsvd0_ver_v.c_present &
8775                  gre_crks_rsvd0_ver_m.c_present);
8776         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
8777                  gre_crks_rsvd0_ver_m.k_present);
8778         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
8779                  gre_crks_rsvd0_ver_v.k_present &
8780                  gre_crks_rsvd0_ver_m.k_present);
8781         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
8782                  gre_crks_rsvd0_ver_m.s_present);
8783         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
8784                  gre_crks_rsvd0_ver_v.s_present &
8785                  gre_crks_rsvd0_ver_m.s_present);
8786 }
8787
8788 /**
8789  * Add NVGRE item to matcher and to the value.
8790  *
8791  * @param[in, out] matcher
8792  *   Flow matcher.
8793  * @param[in, out] key
8794  *   Flow matcher value.
8795  * @param[in] item
8796  *   Flow pattern to translate.
8797  * @param[in] inner
8798  *   Item is inner pattern.
8799  */
8800 static void
8801 flow_dv_translate_item_nvgre(void *matcher, void *key,
8802                              const struct rte_flow_item *item,
8803                              int inner)
8804 {
8805         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
8806         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
8807         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8808         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8809         const char *tni_flow_id_m;
8810         const char *tni_flow_id_v;
8811         char *gre_key_m;
8812         char *gre_key_v;
8813         int size;
8814         int i;
8815
8816         /* For NVGRE, GRE header fields must be set with defined values. */
8817         const struct rte_flow_item_gre gre_spec = {
8818                 .c_rsvd0_ver = RTE_BE16(0x2000),
8819                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
8820         };
8821         const struct rte_flow_item_gre gre_mask = {
8822                 .c_rsvd0_ver = RTE_BE16(0xB000),
8823                 .protocol = RTE_BE16(UINT16_MAX),
8824         };
8825         const struct rte_flow_item gre_item = {
8826                 .spec = &gre_spec,
8827                 .mask = &gre_mask,
8828                 .last = NULL,
8829         };
8830         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
8831         if (!nvgre_v)
8832                 return;
8833         if (!nvgre_m)
8834                 nvgre_m = &rte_flow_item_nvgre_mask;
8835         tni_flow_id_m = (const char *)nvgre_m->tni;
8836         tni_flow_id_v = (const char *)nvgre_v->tni;
8837         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
8838         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
8839         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
8840         memcpy(gre_key_m, tni_flow_id_m, size);
8841         for (i = 0; i < size; ++i)
8842                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
8843 }
8844
8845 /**
8846  * Add VXLAN item to matcher and to the value.
8847  *
8848  * @param[in] dev
8849  *   Pointer to the Ethernet device structure.
8850  * @param[in] attr
8851  *   Flow rule attributes.
8852  * @param[in, out] matcher
8853  *   Flow matcher.
8854  * @param[in, out] key
8855  *   Flow matcher value.
8856  * @param[in] item
8857  *   Flow pattern to translate.
8858  * @param[in] inner
8859  *   Item is inner pattern.
8860  */
8861 static void
8862 flow_dv_translate_item_vxlan(struct rte_eth_dev *dev,
8863                              const struct rte_flow_attr *attr,
8864                              void *matcher, void *key,
8865                              const struct rte_flow_item *item,
8866                              int inner)
8867 {
8868         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
8869         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
8870         void *headers_m;
8871         void *headers_v;
8872         void *misc5_m;
8873         void *misc5_v;
8874         uint32_t *tunnel_header_v;
8875         uint32_t *tunnel_header_m;
8876         uint16_t dport;
8877         struct mlx5_priv *priv = dev->data->dev_private;
8878         const struct rte_flow_item_vxlan nic_mask = {
8879                 .vni = "\xff\xff\xff",
8880                 .rsvd1 = 0xff,
8881         };
8882
8883         if (inner) {
8884                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8885                                          inner_headers);
8886                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8887         } else {
8888                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8889                                          outer_headers);
8890                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8891         }
8892         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8893                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8894         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8895                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8896                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8897         }
8898         dport = MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport);
8899         if (!vxlan_v)
8900                 return;
8901         if (!vxlan_m) {
8902                 if ((!attr->group && !priv->sh->tunnel_header_0_1) ||
8903                     (attr->group && !priv->sh->misc5_cap))
8904                         vxlan_m = &rte_flow_item_vxlan_mask;
8905                 else
8906                         vxlan_m = &nic_mask;
8907         }
8908         if ((priv->sh->steering_format_version ==
8909             MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 &&
8910             dport != MLX5_UDP_PORT_VXLAN) ||
8911             (!attr->group && !attr->transfer && !priv->sh->tunnel_header_0_1) ||
8912             ((attr->group || attr->transfer) && !priv->sh->misc5_cap)) {
8913                 void *misc_m;
8914                 void *misc_v;
8915                 char *vni_m;
8916                 char *vni_v;
8917                 int size;
8918                 int i;
8919                 misc_m = MLX5_ADDR_OF(fte_match_param,
8920                                       matcher, misc_parameters);
8921                 misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8922                 size = sizeof(vxlan_m->vni);
8923                 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8924                 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8925                 memcpy(vni_m, vxlan_m->vni, size);
8926                 for (i = 0; i < size; ++i)
8927                         vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8928                 return;
8929         }
8930         misc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5);
8931         misc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);
8932         tunnel_header_v = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8933                                                    misc5_v,
8934                                                    tunnel_header_1);
8935         tunnel_header_m = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8936                                                    misc5_m,
8937                                                    tunnel_header_1);
8938         *tunnel_header_v = (vxlan_v->vni[0] & vxlan_m->vni[0]) |
8939                            (vxlan_v->vni[1] & vxlan_m->vni[1]) << 8 |
8940                            (vxlan_v->vni[2] & vxlan_m->vni[2]) << 16;
8941         if (*tunnel_header_v)
8942                 *tunnel_header_m = vxlan_m->vni[0] |
8943                         vxlan_m->vni[1] << 8 |
8944                         vxlan_m->vni[2] << 16;
8945         else
8946                 *tunnel_header_m = 0x0;
8947         *tunnel_header_v |= (vxlan_v->rsvd1 & vxlan_m->rsvd1) << 24;
8948         if (vxlan_v->rsvd1 & vxlan_m->rsvd1)
8949                 *tunnel_header_m |= vxlan_m->rsvd1 << 24;
8950 }
8951
8952 /**
8953  * Add VXLAN-GPE item to matcher and to the value.
8954  *
8955  * @param[in, out] matcher
8956  *   Flow matcher.
8957  * @param[in, out] key
8958  *   Flow matcher value.
8959  * @param[in] item
8960  *   Flow pattern to translate.
8961  * @param[in] inner
8962  *   Item is inner pattern.
8963  */
8964
8965 static void
8966 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
8967                                  const struct rte_flow_item *item, int inner)
8968 {
8969         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
8970         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
8971         void *headers_m;
8972         void *headers_v;
8973         void *misc_m =
8974                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
8975         void *misc_v =
8976                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8977         char *vni_m;
8978         char *vni_v;
8979         uint16_t dport;
8980         int size;
8981         int i;
8982         uint8_t flags_m = 0xff;
8983         uint8_t flags_v = 0xc;
8984
8985         if (inner) {
8986                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8987                                          inner_headers);
8988                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8989         } else {
8990                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8991                                          outer_headers);
8992                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8993         }
8994         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8995                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8996         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8997                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8998                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8999         }
9000         if (!vxlan_v)
9001                 return;
9002         if (!vxlan_m)
9003                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
9004         size = sizeof(vxlan_m->vni);
9005         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
9006         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
9007         memcpy(vni_m, vxlan_m->vni, size);
9008         for (i = 0; i < size; ++i)
9009                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
9010         if (vxlan_m->flags) {
9011                 flags_m = vxlan_m->flags;
9012                 flags_v = vxlan_v->flags;
9013         }
9014         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
9015         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
9016         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
9017                  vxlan_m->protocol);
9018         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
9019                  vxlan_v->protocol);
9020 }
9021
9022 /**
9023  * Add Geneve item to matcher and to the value.
9024  *
9025  * @param[in, out] matcher
9026  *   Flow matcher.
9027  * @param[in, out] key
9028  *   Flow matcher value.
9029  * @param[in] item
9030  *   Flow pattern to translate.
9031  * @param[in] inner
9032  *   Item is inner pattern.
9033  */
9034
9035 static void
9036 flow_dv_translate_item_geneve(void *matcher, void *key,
9037                               const struct rte_flow_item *item, int inner)
9038 {
9039         const struct rte_flow_item_geneve *geneve_m = item->mask;
9040         const struct rte_flow_item_geneve *geneve_v = item->spec;
9041         void *headers_m;
9042         void *headers_v;
9043         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9044         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9045         uint16_t dport;
9046         uint16_t gbhdr_m;
9047         uint16_t gbhdr_v;
9048         char *vni_m;
9049         char *vni_v;
9050         size_t size, i;
9051
9052         if (inner) {
9053                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9054                                          inner_headers);
9055                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9056         } else {
9057                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9058                                          outer_headers);
9059                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9060         }
9061         dport = MLX5_UDP_PORT_GENEVE;
9062         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9063                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9064                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9065         }
9066         if (!geneve_v)
9067                 return;
9068         if (!geneve_m)
9069                 geneve_m = &rte_flow_item_geneve_mask;
9070         size = sizeof(geneve_m->vni);
9071         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
9072         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
9073         memcpy(vni_m, geneve_m->vni, size);
9074         for (i = 0; i < size; ++i)
9075                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
9076         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
9077                  rte_be_to_cpu_16(geneve_m->protocol));
9078         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
9079                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
9080         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
9081         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
9082         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
9083                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9084         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
9085                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9086         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9087                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9088         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9089                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
9090                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9091 }
9092
9093 /**
9094  * Create Geneve TLV option resource.
9095  *
9096  * @param dev[in, out]
9097  *   Pointer to rte_eth_dev structure.
9098  * @param[in, out] tag_be24
9099  *   Tag value in big endian then R-shift 8.
9100  * @parm[in, out] dev_flow
9101  *   Pointer to the dev_flow.
9102  * @param[out] error
9103  *   pointer to error structure.
9104  *
9105  * @return
9106  *   0 on success otherwise -errno and errno is set.
9107  */
9108
9109 int
9110 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
9111                                              const struct rte_flow_item *item,
9112                                              struct rte_flow_error *error)
9113 {
9114         struct mlx5_priv *priv = dev->data->dev_private;
9115         struct mlx5_dev_ctx_shared *sh = priv->sh;
9116         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
9117                         sh->geneve_tlv_option_resource;
9118         struct mlx5_devx_obj *obj;
9119         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9120         int ret = 0;
9121
9122         if (!geneve_opt_v)
9123                 return -1;
9124         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
9125         if (geneve_opt_resource != NULL) {
9126                 if (geneve_opt_resource->option_class ==
9127                         geneve_opt_v->option_class &&
9128                         geneve_opt_resource->option_type ==
9129                         geneve_opt_v->option_type &&
9130                         geneve_opt_resource->length ==
9131                         geneve_opt_v->option_len) {
9132                         /* We already have GENVE TLV option obj allocated. */
9133                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
9134                                            __ATOMIC_RELAXED);
9135                 } else {
9136                         ret = rte_flow_error_set(error, ENOMEM,
9137                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9138                                 "Only one GENEVE TLV option supported");
9139                         goto exit;
9140                 }
9141         } else {
9142                 /* Create a GENEVE TLV object and resource. */
9143                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->cdev->ctx,
9144                                 geneve_opt_v->option_class,
9145                                 geneve_opt_v->option_type,
9146                                 geneve_opt_v->option_len);
9147                 if (!obj) {
9148                         ret = rte_flow_error_set(error, ENODATA,
9149                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9150                                 "Failed to create GENEVE TLV Devx object");
9151                         goto exit;
9152                 }
9153                 sh->geneve_tlv_option_resource =
9154                                 mlx5_malloc(MLX5_MEM_ZERO,
9155                                                 sizeof(*geneve_opt_resource),
9156                                                 0, SOCKET_ID_ANY);
9157                 if (!sh->geneve_tlv_option_resource) {
9158                         claim_zero(mlx5_devx_cmd_destroy(obj));
9159                         ret = rte_flow_error_set(error, ENOMEM,
9160                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9161                                 "GENEVE TLV object memory allocation failed");
9162                         goto exit;
9163                 }
9164                 geneve_opt_resource = sh->geneve_tlv_option_resource;
9165                 geneve_opt_resource->obj = obj;
9166                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
9167                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
9168                 geneve_opt_resource->length = geneve_opt_v->option_len;
9169                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
9170                                 __ATOMIC_RELAXED);
9171         }
9172 exit:
9173         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
9174         return ret;
9175 }
9176
9177 /**
9178  * Add Geneve TLV option item to matcher.
9179  *
9180  * @param[in, out] dev
9181  *   Pointer to rte_eth_dev structure.
9182  * @param[in, out] matcher
9183  *   Flow matcher.
9184  * @param[in, out] key
9185  *   Flow matcher value.
9186  * @param[in] item
9187  *   Flow pattern to translate.
9188  * @param[out] error
9189  *   Pointer to error structure.
9190  */
9191 static int
9192 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
9193                                   void *key, const struct rte_flow_item *item,
9194                                   struct rte_flow_error *error)
9195 {
9196         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
9197         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9198         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9199         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9200         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9201                         misc_parameters_3);
9202         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9203         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
9204         int ret = 0;
9205
9206         if (!geneve_opt_v)
9207                 return -1;
9208         if (!geneve_opt_m)
9209                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
9210         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
9211                                                            error);
9212         if (ret) {
9213                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
9214                 return ret;
9215         }
9216         /*
9217          * Set the option length in GENEVE header if not requested.
9218          * The GENEVE TLV option length is expressed by the option length field
9219          * in the GENEVE header.
9220          * If the option length was not requested but the GENEVE TLV option item
9221          * is present we set the option length field implicitly.
9222          */
9223         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
9224                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9225                          MLX5_GENEVE_OPTLEN_MASK);
9226                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9227                          geneve_opt_v->option_len + 1);
9228         }
9229         MLX5_SET(fte_match_set_misc, misc_m, geneve_tlv_option_0_exist, 1);
9230         MLX5_SET(fte_match_set_misc, misc_v, geneve_tlv_option_0_exist, 1);
9231         /* Set the data. */
9232         if (geneve_opt_v->data) {
9233                 memcpy(&opt_data_key, geneve_opt_v->data,
9234                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9235                                 sizeof(opt_data_key)));
9236                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9237                                 sizeof(opt_data_key));
9238                 memcpy(&opt_data_mask, geneve_opt_m->data,
9239                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9240                                 sizeof(opt_data_mask)));
9241                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9242                                 sizeof(opt_data_mask));
9243                 MLX5_SET(fte_match_set_misc3, misc3_m,
9244                                 geneve_tlv_option_0_data,
9245                                 rte_be_to_cpu_32(opt_data_mask));
9246                 MLX5_SET(fte_match_set_misc3, misc3_v,
9247                                 geneve_tlv_option_0_data,
9248                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
9249         }
9250         return ret;
9251 }
9252
9253 /**
9254  * Add MPLS item to matcher and to the value.
9255  *
9256  * @param[in, out] matcher
9257  *   Flow matcher.
9258  * @param[in, out] key
9259  *   Flow matcher value.
9260  * @param[in] item
9261  *   Flow pattern to translate.
9262  * @param[in] prev_layer
9263  *   The protocol layer indicated in previous item.
9264  * @param[in] inner
9265  *   Item is inner pattern.
9266  */
9267 static void
9268 flow_dv_translate_item_mpls(void *matcher, void *key,
9269                             const struct rte_flow_item *item,
9270                             uint64_t prev_layer,
9271                             int inner)
9272 {
9273         const uint32_t *in_mpls_m = item->mask;
9274         const uint32_t *in_mpls_v = item->spec;
9275         uint32_t *out_mpls_m = 0;
9276         uint32_t *out_mpls_v = 0;
9277         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9278         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9279         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
9280                                      misc_parameters_2);
9281         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9282         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9283         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9284
9285         switch (prev_layer) {
9286         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9287                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
9288                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9289                          MLX5_UDP_PORT_MPLS);
9290                 break;
9291         case MLX5_FLOW_LAYER_GRE:
9292                 /* Fall-through. */
9293         case MLX5_FLOW_LAYER_GRE_KEY:
9294                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
9295                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
9296                          RTE_ETHER_TYPE_MPLS);
9297                 break;
9298         default:
9299                 break;
9300         }
9301         if (!in_mpls_v)
9302                 return;
9303         if (!in_mpls_m)
9304                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
9305         switch (prev_layer) {
9306         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9307                 out_mpls_m =
9308                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9309                                                  outer_first_mpls_over_udp);
9310                 out_mpls_v =
9311                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9312                                                  outer_first_mpls_over_udp);
9313                 break;
9314         case MLX5_FLOW_LAYER_GRE:
9315                 out_mpls_m =
9316                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9317                                                  outer_first_mpls_over_gre);
9318                 out_mpls_v =
9319                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9320                                                  outer_first_mpls_over_gre);
9321                 break;
9322         default:
9323                 /* Inner MPLS not over GRE is not supported. */
9324                 if (!inner) {
9325                         out_mpls_m =
9326                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9327                                                          misc2_m,
9328                                                          outer_first_mpls);
9329                         out_mpls_v =
9330                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9331                                                          misc2_v,
9332                                                          outer_first_mpls);
9333                 }
9334                 break;
9335         }
9336         if (out_mpls_m && out_mpls_v) {
9337                 *out_mpls_m = *in_mpls_m;
9338                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
9339         }
9340 }
9341
9342 /**
9343  * Add metadata register item to matcher
9344  *
9345  * @param[in, out] matcher
9346  *   Flow matcher.
9347  * @param[in, out] key
9348  *   Flow matcher value.
9349  * @param[in] reg_type
9350  *   Type of device metadata register
9351  * @param[in] value
9352  *   Register value
9353  * @param[in] mask
9354  *   Register mask
9355  */
9356 static void
9357 flow_dv_match_meta_reg(void *matcher, void *key,
9358                        enum modify_reg reg_type,
9359                        uint32_t data, uint32_t mask)
9360 {
9361         void *misc2_m =
9362                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
9363         void *misc2_v =
9364                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9365         uint32_t temp;
9366
9367         data &= mask;
9368         switch (reg_type) {
9369         case REG_A:
9370                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
9371                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
9372                 break;
9373         case REG_B:
9374                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
9375                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
9376                 break;
9377         case REG_C_0:
9378                 /*
9379                  * The metadata register C0 field might be divided into
9380                  * source vport index and META item value, we should set
9381                  * this field according to specified mask, not as whole one.
9382                  */
9383                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
9384                 temp |= mask;
9385                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
9386                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
9387                 temp &= ~mask;
9388                 temp |= data;
9389                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
9390                 break;
9391         case REG_C_1:
9392                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
9393                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
9394                 break;
9395         case REG_C_2:
9396                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
9397                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
9398                 break;
9399         case REG_C_3:
9400                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
9401                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
9402                 break;
9403         case REG_C_4:
9404                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
9405                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
9406                 break;
9407         case REG_C_5:
9408                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
9409                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
9410                 break;
9411         case REG_C_6:
9412                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
9413                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
9414                 break;
9415         case REG_C_7:
9416                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
9417                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
9418                 break;
9419         default:
9420                 MLX5_ASSERT(false);
9421                 break;
9422         }
9423 }
9424
9425 /**
9426  * Add MARK item to matcher
9427  *
9428  * @param[in] dev
9429  *   The device to configure through.
9430  * @param[in, out] matcher
9431  *   Flow matcher.
9432  * @param[in, out] key
9433  *   Flow matcher value.
9434  * @param[in] item
9435  *   Flow pattern to translate.
9436  */
9437 static void
9438 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
9439                             void *matcher, void *key,
9440                             const struct rte_flow_item *item)
9441 {
9442         struct mlx5_priv *priv = dev->data->dev_private;
9443         const struct rte_flow_item_mark *mark;
9444         uint32_t value;
9445         uint32_t mask;
9446
9447         mark = item->mask ? (const void *)item->mask :
9448                             &rte_flow_item_mark_mask;
9449         mask = mark->id & priv->sh->dv_mark_mask;
9450         mark = (const void *)item->spec;
9451         MLX5_ASSERT(mark);
9452         value = mark->id & priv->sh->dv_mark_mask & mask;
9453         if (mask) {
9454                 enum modify_reg reg;
9455
9456                 /* Get the metadata register index for the mark. */
9457                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
9458                 MLX5_ASSERT(reg > 0);
9459                 if (reg == REG_C_0) {
9460                         struct mlx5_priv *priv = dev->data->dev_private;
9461                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9462                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9463
9464                         mask &= msk_c0;
9465                         mask <<= shl_c0;
9466                         value <<= shl_c0;
9467                 }
9468                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9469         }
9470 }
9471
9472 /**
9473  * Add META item to matcher
9474  *
9475  * @param[in] dev
9476  *   The devich to configure through.
9477  * @param[in, out] matcher
9478  *   Flow matcher.
9479  * @param[in, out] key
9480  *   Flow matcher value.
9481  * @param[in] attr
9482  *   Attributes of flow that includes this item.
9483  * @param[in] item
9484  *   Flow pattern to translate.
9485  */
9486 static void
9487 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
9488                             void *matcher, void *key,
9489                             const struct rte_flow_attr *attr,
9490                             const struct rte_flow_item *item)
9491 {
9492         const struct rte_flow_item_meta *meta_m;
9493         const struct rte_flow_item_meta *meta_v;
9494
9495         meta_m = (const void *)item->mask;
9496         if (!meta_m)
9497                 meta_m = &rte_flow_item_meta_mask;
9498         meta_v = (const void *)item->spec;
9499         if (meta_v) {
9500                 int reg;
9501                 uint32_t value = meta_v->data;
9502                 uint32_t mask = meta_m->data;
9503
9504                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
9505                 if (reg < 0)
9506                         return;
9507                 MLX5_ASSERT(reg != REG_NON);
9508                 if (reg == REG_C_0) {
9509                         struct mlx5_priv *priv = dev->data->dev_private;
9510                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9511                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9512
9513                         mask &= msk_c0;
9514                         mask <<= shl_c0;
9515                         value <<= shl_c0;
9516                 }
9517                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9518         }
9519 }
9520
9521 /**
9522  * Add vport metadata Reg C0 item to matcher
9523  *
9524  * @param[in, out] matcher
9525  *   Flow matcher.
9526  * @param[in, out] key
9527  *   Flow matcher value.
9528  * @param[in] reg
9529  *   Flow pattern to translate.
9530  */
9531 static void
9532 flow_dv_translate_item_meta_vport(void *matcher, void *key,
9533                                   uint32_t value, uint32_t mask)
9534 {
9535         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
9536 }
9537
9538 /**
9539  * Add tag item to matcher
9540  *
9541  * @param[in] dev
9542  *   The devich to configure through.
9543  * @param[in, out] matcher
9544  *   Flow matcher.
9545  * @param[in, out] key
9546  *   Flow matcher value.
9547  * @param[in] item
9548  *   Flow pattern to translate.
9549  */
9550 static void
9551 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
9552                                 void *matcher, void *key,
9553                                 const struct rte_flow_item *item)
9554 {
9555         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
9556         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
9557         uint32_t mask, value;
9558
9559         MLX5_ASSERT(tag_v);
9560         value = tag_v->data;
9561         mask = tag_m ? tag_m->data : UINT32_MAX;
9562         if (tag_v->id == REG_C_0) {
9563                 struct mlx5_priv *priv = dev->data->dev_private;
9564                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9565                 uint32_t shl_c0 = rte_bsf32(msk_c0);
9566
9567                 mask &= msk_c0;
9568                 mask <<= shl_c0;
9569                 value <<= shl_c0;
9570         }
9571         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
9572 }
9573
9574 /**
9575  * Add TAG item to matcher
9576  *
9577  * @param[in] dev
9578  *   The devich to configure through.
9579  * @param[in, out] matcher
9580  *   Flow matcher.
9581  * @param[in, out] key
9582  *   Flow matcher value.
9583  * @param[in] item
9584  *   Flow pattern to translate.
9585  */
9586 static void
9587 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
9588                            void *matcher, void *key,
9589                            const struct rte_flow_item *item)
9590 {
9591         const struct rte_flow_item_tag *tag_v = item->spec;
9592         const struct rte_flow_item_tag *tag_m = item->mask;
9593         enum modify_reg reg;
9594
9595         MLX5_ASSERT(tag_v);
9596         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
9597         /* Get the metadata register index for the tag. */
9598         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
9599         MLX5_ASSERT(reg > 0);
9600         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
9601 }
9602
9603 /**
9604  * Add source vport match to the specified matcher.
9605  *
9606  * @param[in, out] matcher
9607  *   Flow matcher.
9608  * @param[in, out] key
9609  *   Flow matcher value.
9610  * @param[in] port
9611  *   Source vport value to match
9612  * @param[in] mask
9613  *   Mask
9614  */
9615 static void
9616 flow_dv_translate_item_source_vport(void *matcher, void *key,
9617                                     int16_t port, uint16_t mask)
9618 {
9619         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9620         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9621
9622         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
9623         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
9624 }
9625
9626 /**
9627  * Translate port-id item to eswitch match on  port-id.
9628  *
9629  * @param[in] dev
9630  *   The devich to configure through.
9631  * @param[in, out] matcher
9632  *   Flow matcher.
9633  * @param[in, out] key
9634  *   Flow matcher value.
9635  * @param[in] item
9636  *   Flow pattern to translate.
9637  * @param[in]
9638  *   Flow attributes.
9639  *
9640  * @return
9641  *   0 on success, a negative errno value otherwise.
9642  */
9643 static int
9644 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
9645                                void *key, const struct rte_flow_item *item,
9646                                const struct rte_flow_attr *attr)
9647 {
9648         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
9649         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
9650         struct mlx5_priv *priv;
9651         uint16_t mask, id;
9652
9653         if (pid_v && pid_v->id == MLX5_PORT_ESW_MGR) {
9654                 flow_dv_translate_item_source_vport(matcher, key,
9655                         flow_dv_get_esw_manager_vport_id(dev), 0xffff);
9656                 return 0;
9657         }
9658         mask = pid_m ? pid_m->id : 0xffff;
9659         id = pid_v ? pid_v->id : dev->data->port_id;
9660         priv = mlx5_port_to_eswitch_info(id, item == NULL);
9661         if (!priv)
9662                 return -rte_errno;
9663         /*
9664          * Translate to vport field or to metadata, depending on mode.
9665          * Kernel can use either misc.source_port or half of C0 metadata
9666          * register.
9667          */
9668         if (priv->vport_meta_mask) {
9669                 /*
9670                  * Provide the hint for SW steering library
9671                  * to insert the flow into ingress domain and
9672                  * save the extra vport match.
9673                  */
9674                 if (mask == 0xffff && priv->vport_id == 0xffff &&
9675                     priv->pf_bond < 0 && attr->transfer)
9676                         flow_dv_translate_item_source_vport
9677                                 (matcher, key, priv->vport_id, mask);
9678                 /*
9679                  * We should always set the vport metadata register,
9680                  * otherwise the SW steering library can drop
9681                  * the rule if wire vport metadata value is not zero,
9682                  * it depends on kernel configuration.
9683                  */
9684                 flow_dv_translate_item_meta_vport(matcher, key,
9685                                                   priv->vport_meta_tag,
9686                                                   priv->vport_meta_mask);
9687         } else {
9688                 flow_dv_translate_item_source_vport(matcher, key,
9689                                                     priv->vport_id, mask);
9690         }
9691         return 0;
9692 }
9693
9694 /**
9695  * Add ICMP6 item to matcher and to the value.
9696  *
9697  * @param[in, out] matcher
9698  *   Flow matcher.
9699  * @param[in, out] key
9700  *   Flow matcher value.
9701  * @param[in] item
9702  *   Flow pattern to translate.
9703  * @param[in] inner
9704  *   Item is inner pattern.
9705  */
9706 static void
9707 flow_dv_translate_item_icmp6(void *matcher, void *key,
9708                               const struct rte_flow_item *item,
9709                               int inner)
9710 {
9711         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
9712         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
9713         void *headers_m;
9714         void *headers_v;
9715         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9716                                      misc_parameters_3);
9717         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9718         if (inner) {
9719                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9720                                          inner_headers);
9721                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9722         } else {
9723                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9724                                          outer_headers);
9725                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9726         }
9727         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9728         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
9729         if (!icmp6_v)
9730                 return;
9731         if (!icmp6_m)
9732                 icmp6_m = &rte_flow_item_icmp6_mask;
9733         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
9734         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
9735                  icmp6_v->type & icmp6_m->type);
9736         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
9737         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
9738                  icmp6_v->code & icmp6_m->code);
9739 }
9740
9741 /**
9742  * Add ICMP item to matcher and to the value.
9743  *
9744  * @param[in, out] matcher
9745  *   Flow matcher.
9746  * @param[in, out] key
9747  *   Flow matcher value.
9748  * @param[in] item
9749  *   Flow pattern to translate.
9750  * @param[in] inner
9751  *   Item is inner pattern.
9752  */
9753 static void
9754 flow_dv_translate_item_icmp(void *matcher, void *key,
9755                             const struct rte_flow_item *item,
9756                             int inner)
9757 {
9758         const struct rte_flow_item_icmp *icmp_m = item->mask;
9759         const struct rte_flow_item_icmp *icmp_v = item->spec;
9760         uint32_t icmp_header_data_m = 0;
9761         uint32_t icmp_header_data_v = 0;
9762         void *headers_m;
9763         void *headers_v;
9764         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9765                                      misc_parameters_3);
9766         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9767         if (inner) {
9768                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9769                                          inner_headers);
9770                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9771         } else {
9772                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9773                                          outer_headers);
9774                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9775         }
9776         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9777         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
9778         if (!icmp_v)
9779                 return;
9780         if (!icmp_m)
9781                 icmp_m = &rte_flow_item_icmp_mask;
9782         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
9783                  icmp_m->hdr.icmp_type);
9784         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
9785                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
9786         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
9787                  icmp_m->hdr.icmp_code);
9788         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
9789                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
9790         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
9791         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
9792         if (icmp_header_data_m) {
9793                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
9794                 icmp_header_data_v |=
9795                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
9796                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
9797                          icmp_header_data_m);
9798                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
9799                          icmp_header_data_v & icmp_header_data_m);
9800         }
9801 }
9802
9803 /**
9804  * Add GTP item to matcher and to the value.
9805  *
9806  * @param[in, out] matcher
9807  *   Flow matcher.
9808  * @param[in, out] key
9809  *   Flow matcher value.
9810  * @param[in] item
9811  *   Flow pattern to translate.
9812  * @param[in] inner
9813  *   Item is inner pattern.
9814  */
9815 static void
9816 flow_dv_translate_item_gtp(void *matcher, void *key,
9817                            const struct rte_flow_item *item, int inner)
9818 {
9819         const struct rte_flow_item_gtp *gtp_m = item->mask;
9820         const struct rte_flow_item_gtp *gtp_v = item->spec;
9821         void *headers_m;
9822         void *headers_v;
9823         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9824                                      misc_parameters_3);
9825         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9826         uint16_t dport = RTE_GTPU_UDP_PORT;
9827
9828         if (inner) {
9829                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9830                                          inner_headers);
9831                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9832         } else {
9833                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9834                                          outer_headers);
9835                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9836         }
9837         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9838                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9839                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9840         }
9841         if (!gtp_v)
9842                 return;
9843         if (!gtp_m)
9844                 gtp_m = &rte_flow_item_gtp_mask;
9845         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
9846                  gtp_m->v_pt_rsv_flags);
9847         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
9848                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
9849         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
9850         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
9851                  gtp_v->msg_type & gtp_m->msg_type);
9852         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
9853                  rte_be_to_cpu_32(gtp_m->teid));
9854         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
9855                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
9856 }
9857
9858 /**
9859  * Add GTP PSC item to matcher.
9860  *
9861  * @param[in, out] matcher
9862  *   Flow matcher.
9863  * @param[in, out] key
9864  *   Flow matcher value.
9865  * @param[in] item
9866  *   Flow pattern to translate.
9867  */
9868 static int
9869 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
9870                                const struct rte_flow_item *item)
9871 {
9872         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
9873         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
9874         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9875                         misc_parameters_3);
9876         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9877         union {
9878                 uint32_t w32;
9879                 struct {
9880                         uint16_t seq_num;
9881                         uint8_t npdu_num;
9882                         uint8_t next_ext_header_type;
9883                 };
9884         } dw_2;
9885         uint8_t gtp_flags;
9886
9887         /* Always set E-flag match on one, regardless of GTP item settings. */
9888         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
9889         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9890         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
9891         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
9892         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9893         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
9894         /*Set next extension header type. */
9895         dw_2.seq_num = 0;
9896         dw_2.npdu_num = 0;
9897         dw_2.next_ext_header_type = 0xff;
9898         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
9899                  rte_cpu_to_be_32(dw_2.w32));
9900         dw_2.seq_num = 0;
9901         dw_2.npdu_num = 0;
9902         dw_2.next_ext_header_type = 0x85;
9903         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
9904                  rte_cpu_to_be_32(dw_2.w32));
9905         if (gtp_psc_v) {
9906                 union {
9907                         uint32_t w32;
9908                         struct {
9909                                 uint8_t len;
9910                                 uint8_t type_flags;
9911                                 uint8_t qfi;
9912                                 uint8_t reserved;
9913                         };
9914                 } dw_0;
9915
9916                 /*Set extension header PDU type and Qos. */
9917                 if (!gtp_psc_m)
9918                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
9919                 dw_0.w32 = 0;
9920                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->hdr.type);
9921                 dw_0.qfi = gtp_psc_m->hdr.qfi;
9922                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
9923                          rte_cpu_to_be_32(dw_0.w32));
9924                 dw_0.w32 = 0;
9925                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->hdr.type &
9926                                                         gtp_psc_m->hdr.type);
9927                 dw_0.qfi = gtp_psc_v->hdr.qfi & gtp_psc_m->hdr.qfi;
9928                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9929                          rte_cpu_to_be_32(dw_0.w32));
9930         }
9931         return 0;
9932 }
9933
9934 /**
9935  * Add eCPRI item to matcher and to the value.
9936  *
9937  * @param[in] dev
9938  *   The devich to configure through.
9939  * @param[in, out] matcher
9940  *   Flow matcher.
9941  * @param[in, out] key
9942  *   Flow matcher value.
9943  * @param[in] item
9944  *   Flow pattern to translate.
9945  * @param[in] last_item
9946  *   Last item flags.
9947  */
9948 static void
9949 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
9950                              void *key, const struct rte_flow_item *item,
9951                              uint64_t last_item)
9952 {
9953         struct mlx5_priv *priv = dev->data->dev_private;
9954         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
9955         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
9956         struct rte_ecpri_common_hdr common;
9957         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
9958                                      misc_parameters_4);
9959         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
9960         uint32_t *samples;
9961         void *dw_m;
9962         void *dw_v;
9963
9964         /*
9965          * In case of eCPRI over Ethernet, if EtherType is not specified,
9966          * match on eCPRI EtherType implicitly.
9967          */
9968         if (last_item & MLX5_FLOW_LAYER_OUTER_L2) {
9969                 void *hdrs_m, *hdrs_v, *l2m, *l2v;
9970
9971                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9972                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9973                 l2m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, ethertype);
9974                 l2v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
9975                 if (*(uint16_t *)l2m == 0 && *(uint16_t *)l2v == 0) {
9976                         *(uint16_t *)l2m = UINT16_MAX;
9977                         *(uint16_t *)l2v = RTE_BE16(RTE_ETHER_TYPE_ECPRI);
9978                 }
9979         }
9980         if (!ecpri_v)
9981                 return;
9982         if (!ecpri_m)
9983                 ecpri_m = &rte_flow_item_ecpri_mask;
9984         /*
9985          * Maximal four DW samples are supported in a single matching now.
9986          * Two are used now for a eCPRI matching:
9987          * 1. Type: one byte, mask should be 0x00ff0000 in network order
9988          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
9989          *    if any.
9990          */
9991         if (!ecpri_m->hdr.common.u32)
9992                 return;
9993         samples = priv->sh->ecpri_parser.ids;
9994         /* Need to take the whole DW as the mask to fill the entry. */
9995         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9996                             prog_sample_field_value_0);
9997         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9998                             prog_sample_field_value_0);
9999         /* Already big endian (network order) in the header. */
10000         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
10001         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
10002         /* Sample#0, used for matching type, offset 0. */
10003         MLX5_SET(fte_match_set_misc4, misc4_m,
10004                  prog_sample_field_id_0, samples[0]);
10005         /* It makes no sense to set the sample ID in the mask field. */
10006         MLX5_SET(fte_match_set_misc4, misc4_v,
10007                  prog_sample_field_id_0, samples[0]);
10008         /*
10009          * Checking if message body part needs to be matched.
10010          * Some wildcard rules only matching type field should be supported.
10011          */
10012         if (ecpri_m->hdr.dummy[0]) {
10013                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
10014                 switch (common.type) {
10015                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
10016                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
10017                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
10018                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
10019                                             prog_sample_field_value_1);
10020                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
10021                                             prog_sample_field_value_1);
10022                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
10023                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
10024                                             ecpri_m->hdr.dummy[0];
10025                         /* Sample#1, to match message body, offset 4. */
10026                         MLX5_SET(fte_match_set_misc4, misc4_m,
10027                                  prog_sample_field_id_1, samples[1]);
10028                         MLX5_SET(fte_match_set_misc4, misc4_v,
10029                                  prog_sample_field_id_1, samples[1]);
10030                         break;
10031                 default:
10032                         /* Others, do not match any sample ID. */
10033                         break;
10034                 }
10035         }
10036 }
10037
10038 /*
10039  * Add connection tracking status item to matcher
10040  *
10041  * @param[in] dev
10042  *   The devich to configure through.
10043  * @param[in, out] matcher
10044  *   Flow matcher.
10045  * @param[in, out] key
10046  *   Flow matcher value.
10047  * @param[in] item
10048  *   Flow pattern to translate.
10049  */
10050 static void
10051 flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
10052                               void *matcher, void *key,
10053                               const struct rte_flow_item *item)
10054 {
10055         uint32_t reg_value = 0;
10056         int reg_id;
10057         /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */
10058         uint32_t reg_mask = 0;
10059         const struct rte_flow_item_conntrack *spec = item->spec;
10060         const struct rte_flow_item_conntrack *mask = item->mask;
10061         uint32_t flags;
10062         struct rte_flow_error error;
10063
10064         if (!mask)
10065                 mask = &rte_flow_item_conntrack_mask;
10066         if (!spec || !mask->flags)
10067                 return;
10068         flags = spec->flags & mask->flags;
10069         /* The conflict should be checked in the validation. */
10070         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
10071                 reg_value |= MLX5_CT_SYNDROME_VALID;
10072         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10073                 reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
10074         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
10075                 reg_value |= MLX5_CT_SYNDROME_INVALID;
10076         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
10077                 reg_value |= MLX5_CT_SYNDROME_TRAP;
10078         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10079                 reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
10080         if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
10081                            RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
10082                            RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
10083                 reg_mask |= 0xc0;
10084         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10085                 reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
10086         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10087                 reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
10088         /* The REG_C_x value could be saved during startup. */
10089         reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);
10090         if (reg_id == REG_NON)
10091                 return;
10092         flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,
10093                                reg_value, reg_mask);
10094 }
10095
10096 static void
10097 flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key,
10098                             const struct rte_flow_item *item,
10099                             struct mlx5_flow *dev_flow, bool is_inner)
10100 {
10101         const struct rte_flow_item_flex *spec =
10102                 (const struct rte_flow_item_flex *)item->spec;
10103         int index = mlx5_flex_acquire_index(dev, spec->handle, false);
10104
10105         MLX5_ASSERT(index >= 0 && index <= (int)(sizeof(uint32_t) * CHAR_BIT));
10106         if (index < 0)
10107                 return;
10108         if (!(dev_flow->handle->flex_item & RTE_BIT32(index))) {
10109                 /* Don't count both inner and outer flex items in one rule. */
10110                 if (mlx5_flex_acquire_index(dev, spec->handle, true) != index)
10111                         MLX5_ASSERT(false);
10112                 dev_flow->handle->flex_item |= RTE_BIT32(index);
10113         }
10114         mlx5_flex_flow_translate_item(dev, matcher, key, item, is_inner);
10115 }
10116
10117 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
10118
10119 #define HEADER_IS_ZERO(match_criteria, headers)                              \
10120         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
10121                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
10122
10123 /**
10124  * Calculate flow matcher enable bitmap.
10125  *
10126  * @param match_criteria
10127  *   Pointer to flow matcher criteria.
10128  *
10129  * @return
10130  *   Bitmap of enabled fields.
10131  */
10132 static uint8_t
10133 flow_dv_matcher_enable(uint32_t *match_criteria)
10134 {
10135         uint8_t match_criteria_enable;
10136
10137         match_criteria_enable =
10138                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
10139                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
10140         match_criteria_enable |=
10141                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
10142                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
10143         match_criteria_enable |=
10144                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
10145                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
10146         match_criteria_enable |=
10147                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
10148                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
10149         match_criteria_enable |=
10150                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
10151                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
10152         match_criteria_enable |=
10153                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
10154                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
10155         match_criteria_enable |=
10156                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_5)) <<
10157                 MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT;
10158         return match_criteria_enable;
10159 }
10160
10161 static void
10162 __flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria)
10163 {
10164         /*
10165          * Check flow matching criteria first, subtract misc5/4 length if flow
10166          * doesn't own misc5/4 parameters. In some old rdma-core releases,
10167          * misc5/4 are not supported, and matcher creation failure is expected
10168          * w/o subtration. If misc5 is provided, misc4 must be counted in since
10169          * misc5 is right after misc4.
10170          */
10171         if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) {
10172                 *size = MLX5_ST_SZ_BYTES(fte_match_param) -
10173                         MLX5_ST_SZ_BYTES(fte_match_set_misc5);
10174                 if (!(match_criteria & (1 <<
10175                         MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT))) {
10176                         *size -= MLX5_ST_SZ_BYTES(fte_match_set_misc4);
10177                 }
10178         }
10179 }
10180
10181 static struct mlx5_list_entry *
10182 flow_dv_matcher_clone_cb(void *tool_ctx __rte_unused,
10183                          struct mlx5_list_entry *entry, void *cb_ctx)
10184 {
10185         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10186         struct mlx5_flow_dv_matcher *ref = ctx->data;
10187         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10188                                                             typeof(*tbl), tbl);
10189         struct mlx5_flow_dv_matcher *resource = mlx5_malloc(MLX5_MEM_ANY,
10190                                                             sizeof(*resource),
10191                                                             0, SOCKET_ID_ANY);
10192
10193         if (!resource) {
10194                 rte_flow_error_set(ctx->error, ENOMEM,
10195                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10196                                    "cannot create matcher");
10197                 return NULL;
10198         }
10199         memcpy(resource, entry, sizeof(*resource));
10200         resource->tbl = &tbl->tbl;
10201         return &resource->entry;
10202 }
10203
10204 static void
10205 flow_dv_matcher_clone_free_cb(void *tool_ctx __rte_unused,
10206                              struct mlx5_list_entry *entry)
10207 {
10208         mlx5_free(entry);
10209 }
10210
10211 struct mlx5_list_entry *
10212 flow_dv_tbl_create_cb(void *tool_ctx, void *cb_ctx)
10213 {
10214         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10215         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10216         struct rte_eth_dev *dev = ctx->dev;
10217         struct mlx5_flow_tbl_data_entry *tbl_data;
10218         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data2;
10219         struct rte_flow_error *error = ctx->error;
10220         union mlx5_flow_tbl_key key = { .v64 = *(uint64_t *)(ctx->data) };
10221         struct mlx5_flow_tbl_resource *tbl;
10222         void *domain;
10223         uint32_t idx = 0;
10224         int ret;
10225
10226         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10227         if (!tbl_data) {
10228                 rte_flow_error_set(error, ENOMEM,
10229                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10230                                    NULL,
10231                                    "cannot allocate flow table data entry");
10232                 return NULL;
10233         }
10234         tbl_data->idx = idx;
10235         tbl_data->tunnel = tt_prm->tunnel;
10236         tbl_data->group_id = tt_prm->group_id;
10237         tbl_data->external = !!tt_prm->external;
10238         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
10239         tbl_data->is_egress = !!key.is_egress;
10240         tbl_data->is_transfer = !!key.is_fdb;
10241         tbl_data->dummy = !!key.dummy;
10242         tbl_data->level = key.level;
10243         tbl_data->id = key.id;
10244         tbl = &tbl_data->tbl;
10245         if (key.dummy)
10246                 return &tbl_data->entry;
10247         if (key.is_fdb)
10248                 domain = sh->fdb_domain;
10249         else if (key.is_egress)
10250                 domain = sh->tx_domain;
10251         else
10252                 domain = sh->rx_domain;
10253         ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
10254         if (ret) {
10255                 rte_flow_error_set(error, ENOMEM,
10256                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10257                                    NULL, "cannot create flow table object");
10258                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10259                 return NULL;
10260         }
10261         if (key.level != 0) {
10262                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
10263                                         (tbl->obj, &tbl_data->jump.action);
10264                 if (ret) {
10265                         rte_flow_error_set(error, ENOMEM,
10266                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10267                                            NULL,
10268                                            "cannot create flow jump action");
10269                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10270                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10271                         return NULL;
10272                 }
10273         }
10274         MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
10275               key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
10276               key.level, key.id);
10277         tbl_data->matchers = mlx5_list_create(matcher_name, sh, true,
10278                                               flow_dv_matcher_create_cb,
10279                                               flow_dv_matcher_match_cb,
10280                                               flow_dv_matcher_remove_cb,
10281                                               flow_dv_matcher_clone_cb,
10282                                               flow_dv_matcher_clone_free_cb);
10283         if (!tbl_data->matchers) {
10284                 rte_flow_error_set(error, ENOMEM,
10285                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10286                                    NULL,
10287                                    "cannot create tbl matcher list");
10288                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10289                 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10290                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10291                 return NULL;
10292         }
10293         return &tbl_data->entry;
10294 }
10295
10296 int
10297 flow_dv_tbl_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10298                      void *cb_ctx)
10299 {
10300         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10301         struct mlx5_flow_tbl_data_entry *tbl_data =
10302                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10303         union mlx5_flow_tbl_key key = { .v64 =  *(uint64_t *)(ctx->data) };
10304
10305         return tbl_data->level != key.level ||
10306                tbl_data->id != key.id ||
10307                tbl_data->dummy != key.dummy ||
10308                tbl_data->is_transfer != !!key.is_fdb ||
10309                tbl_data->is_egress != !!key.is_egress;
10310 }
10311
10312 struct mlx5_list_entry *
10313 flow_dv_tbl_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10314                       void *cb_ctx)
10315 {
10316         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10317         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10318         struct mlx5_flow_tbl_data_entry *tbl_data;
10319         struct rte_flow_error *error = ctx->error;
10320         uint32_t idx = 0;
10321
10322         tbl_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10323         if (!tbl_data) {
10324                 rte_flow_error_set(error, ENOMEM,
10325                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10326                                    NULL,
10327                                    "cannot allocate flow table data entry");
10328                 return NULL;
10329         }
10330         memcpy(tbl_data, oentry, sizeof(*tbl_data));
10331         tbl_data->idx = idx;
10332         return &tbl_data->entry;
10333 }
10334
10335 void
10336 flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10337 {
10338         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10339         struct mlx5_flow_tbl_data_entry *tbl_data =
10340                     container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10341
10342         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10343 }
10344
10345 /**
10346  * Get a flow table.
10347  *
10348  * @param[in, out] dev
10349  *   Pointer to rte_eth_dev structure.
10350  * @param[in] table_level
10351  *   Table level to use.
10352  * @param[in] egress
10353  *   Direction of the table.
10354  * @param[in] transfer
10355  *   E-Switch or NIC flow.
10356  * @param[in] dummy
10357  *   Dummy entry for dv API.
10358  * @param[in] table_id
10359  *   Table id to use.
10360  * @param[out] error
10361  *   pointer to error structure.
10362  *
10363  * @return
10364  *   Returns tables resource based on the index, NULL in case of failed.
10365  */
10366 struct mlx5_flow_tbl_resource *
10367 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
10368                          uint32_t table_level, uint8_t egress,
10369                          uint8_t transfer,
10370                          bool external,
10371                          const struct mlx5_flow_tunnel *tunnel,
10372                          uint32_t group_id, uint8_t dummy,
10373                          uint32_t table_id,
10374                          struct rte_flow_error *error)
10375 {
10376         struct mlx5_priv *priv = dev->data->dev_private;
10377         union mlx5_flow_tbl_key table_key = {
10378                 {
10379                         .level = table_level,
10380                         .id = table_id,
10381                         .reserved = 0,
10382                         .dummy = !!dummy,
10383                         .is_fdb = !!transfer,
10384                         .is_egress = !!egress,
10385                 }
10386         };
10387         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
10388                 .tunnel = tunnel,
10389                 .group_id = group_id,
10390                 .external = external,
10391         };
10392         struct mlx5_flow_cb_ctx ctx = {
10393                 .dev = dev,
10394                 .error = error,
10395                 .data = &table_key.v64,
10396                 .data2 = &tt_prm,
10397         };
10398         struct mlx5_list_entry *entry;
10399         struct mlx5_flow_tbl_data_entry *tbl_data;
10400
10401         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
10402         if (!entry) {
10403                 rte_flow_error_set(error, ENOMEM,
10404                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10405                                    "cannot get table");
10406                 return NULL;
10407         }
10408         DRV_LOG(DEBUG, "table_level %u table_id %u "
10409                 "tunnel %u group %u registered.",
10410                 table_level, table_id,
10411                 tunnel ? tunnel->tunnel_id : 0, group_id);
10412         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10413         return &tbl_data->tbl;
10414 }
10415
10416 void
10417 flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10418 {
10419         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10420         struct mlx5_flow_tbl_data_entry *tbl_data =
10421                     container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10422
10423         MLX5_ASSERT(entry && sh);
10424         if (tbl_data->jump.action)
10425                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10426         if (tbl_data->tbl.obj)
10427                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
10428         if (tbl_data->tunnel_offload && tbl_data->external) {
10429                 struct mlx5_list_entry *he;
10430                 struct mlx5_hlist *tunnel_grp_hash;
10431                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
10432                 union tunnel_tbl_key tunnel_key = {
10433                         .tunnel_id = tbl_data->tunnel ?
10434                                         tbl_data->tunnel->tunnel_id : 0,
10435                         .group = tbl_data->group_id
10436                 };
10437                 uint32_t table_level = tbl_data->level;
10438                 struct mlx5_flow_cb_ctx ctx = {
10439                         .data = (void *)&tunnel_key.val,
10440                 };
10441
10442                 tunnel_grp_hash = tbl_data->tunnel ?
10443                                         tbl_data->tunnel->groups :
10444                                         thub->groups;
10445                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, &ctx);
10446                 if (he)
10447                         mlx5_hlist_unregister(tunnel_grp_hash, he);
10448                 DRV_LOG(DEBUG,
10449                         "table_level %u id %u tunnel %u group %u released.",
10450                         table_level,
10451                         tbl_data->id,
10452                         tbl_data->tunnel ?
10453                         tbl_data->tunnel->tunnel_id : 0,
10454                         tbl_data->group_id);
10455         }
10456         mlx5_list_destroy(tbl_data->matchers);
10457         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10458 }
10459
10460 /**
10461  * Release a flow table.
10462  *
10463  * @param[in] sh
10464  *   Pointer to device shared structure.
10465  * @param[in] tbl
10466  *   Table resource to be released.
10467  *
10468  * @return
10469  *   Returns 0 if table was released, else return 1;
10470  */
10471 static int
10472 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
10473                              struct mlx5_flow_tbl_resource *tbl)
10474 {
10475         struct mlx5_flow_tbl_data_entry *tbl_data =
10476                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10477
10478         if (!tbl)
10479                 return 0;
10480         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
10481 }
10482
10483 int
10484 flow_dv_matcher_match_cb(void *tool_ctx __rte_unused,
10485                          struct mlx5_list_entry *entry, void *cb_ctx)
10486 {
10487         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10488         struct mlx5_flow_dv_matcher *ref = ctx->data;
10489         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
10490                                                         entry);
10491
10492         return cur->crc != ref->crc ||
10493                cur->priority != ref->priority ||
10494                memcmp((const void *)cur->mask.buf,
10495                       (const void *)ref->mask.buf, ref->mask.size);
10496 }
10497
10498 struct mlx5_list_entry *
10499 flow_dv_matcher_create_cb(void *tool_ctx, void *cb_ctx)
10500 {
10501         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10502         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10503         struct mlx5_flow_dv_matcher *ref = ctx->data;
10504         struct mlx5_flow_dv_matcher *resource;
10505         struct mlx5dv_flow_matcher_attr dv_attr = {
10506                 .type = IBV_FLOW_ATTR_NORMAL,
10507                 .match_mask = (void *)&ref->mask,
10508         };
10509         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10510                                                             typeof(*tbl), tbl);
10511         int ret;
10512
10513         resource = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*resource), 0,
10514                                SOCKET_ID_ANY);
10515         if (!resource) {
10516                 rte_flow_error_set(ctx->error, ENOMEM,
10517                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10518                                    "cannot create matcher");
10519                 return NULL;
10520         }
10521         *resource = *ref;
10522         dv_attr.match_criteria_enable =
10523                 flow_dv_matcher_enable(resource->mask.buf);
10524         __flow_dv_adjust_buf_size(&ref->mask.size,
10525                                   dv_attr.match_criteria_enable);
10526         dv_attr.priority = ref->priority;
10527         if (tbl->is_egress)
10528                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
10529         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
10530                                                tbl->tbl.obj,
10531                                                &resource->matcher_object);
10532         if (ret) {
10533                 mlx5_free(resource);
10534                 rte_flow_error_set(ctx->error, ENOMEM,
10535                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10536                                    "cannot create matcher");
10537                 return NULL;
10538         }
10539         return &resource->entry;
10540 }
10541
10542 /**
10543  * Register the flow matcher.
10544  *
10545  * @param[in, out] dev
10546  *   Pointer to rte_eth_dev structure.
10547  * @param[in, out] matcher
10548  *   Pointer to flow matcher.
10549  * @param[in, out] key
10550  *   Pointer to flow table key.
10551  * @parm[in, out] dev_flow
10552  *   Pointer to the dev_flow.
10553  * @param[out] error
10554  *   pointer to error structure.
10555  *
10556  * @return
10557  *   0 on success otherwise -errno and errno is set.
10558  */
10559 static int
10560 flow_dv_matcher_register(struct rte_eth_dev *dev,
10561                          struct mlx5_flow_dv_matcher *ref,
10562                          union mlx5_flow_tbl_key *key,
10563                          struct mlx5_flow *dev_flow,
10564                          const struct mlx5_flow_tunnel *tunnel,
10565                          uint32_t group_id,
10566                          struct rte_flow_error *error)
10567 {
10568         struct mlx5_list_entry *entry;
10569         struct mlx5_flow_dv_matcher *resource;
10570         struct mlx5_flow_tbl_resource *tbl;
10571         struct mlx5_flow_tbl_data_entry *tbl_data;
10572         struct mlx5_flow_cb_ctx ctx = {
10573                 .error = error,
10574                 .data = ref,
10575         };
10576         /**
10577          * tunnel offload API requires this registration for cases when
10578          * tunnel match rule was inserted before tunnel set rule.
10579          */
10580         tbl = flow_dv_tbl_resource_get(dev, key->level,
10581                                        key->is_egress, key->is_fdb,
10582                                        dev_flow->external, tunnel,
10583                                        group_id, 0, key->id, error);
10584         if (!tbl)
10585                 return -rte_errno;      /* No need to refill the error info */
10586         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10587         ref->tbl = tbl;
10588         entry = mlx5_list_register(tbl_data->matchers, &ctx);
10589         if (!entry) {
10590                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10591                 return rte_flow_error_set(error, ENOMEM,
10592                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10593                                           "cannot allocate ref memory");
10594         }
10595         resource = container_of(entry, typeof(*resource), entry);
10596         dev_flow->handle->dvh.matcher = resource;
10597         return 0;
10598 }
10599
10600 struct mlx5_list_entry *
10601 flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx)
10602 {
10603         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10604         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10605         struct mlx5_flow_dv_tag_resource *entry;
10606         uint32_t idx = 0;
10607         int ret;
10608
10609         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10610         if (!entry) {
10611                 rte_flow_error_set(ctx->error, ENOMEM,
10612                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10613                                    "cannot allocate resource memory");
10614                 return NULL;
10615         }
10616         entry->idx = idx;
10617         entry->tag_id = *(uint32_t *)(ctx->data);
10618         ret = mlx5_flow_os_create_flow_action_tag(entry->tag_id,
10619                                                   &entry->action);
10620         if (ret) {
10621                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
10622                 rte_flow_error_set(ctx->error, ENOMEM,
10623                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10624                                    NULL, "cannot create action");
10625                 return NULL;
10626         }
10627         return &entry->entry;
10628 }
10629
10630 int
10631 flow_dv_tag_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10632                      void *cb_ctx)
10633 {
10634         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10635         struct mlx5_flow_dv_tag_resource *tag =
10636                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10637
10638         return *(uint32_t *)(ctx->data) != tag->tag_id;
10639 }
10640
10641 struct mlx5_list_entry *
10642 flow_dv_tag_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10643                      void *cb_ctx)
10644 {
10645         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10646         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10647         struct mlx5_flow_dv_tag_resource *entry;
10648         uint32_t idx = 0;
10649
10650         entry = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10651         if (!entry) {
10652                 rte_flow_error_set(ctx->error, ENOMEM,
10653                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10654                                    "cannot allocate tag resource memory");
10655                 return NULL;
10656         }
10657         memcpy(entry, oentry, sizeof(*entry));
10658         entry->idx = idx;
10659         return &entry->entry;
10660 }
10661
10662 void
10663 flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10664 {
10665         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10666         struct mlx5_flow_dv_tag_resource *tag =
10667                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10668
10669         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10670 }
10671
10672 /**
10673  * Find existing tag resource or create and register a new one.
10674  *
10675  * @param dev[in, out]
10676  *   Pointer to rte_eth_dev structure.
10677  * @param[in, out] tag_be24
10678  *   Tag value in big endian then R-shift 8.
10679  * @parm[in, out] dev_flow
10680  *   Pointer to the dev_flow.
10681  * @param[out] error
10682  *   pointer to error structure.
10683  *
10684  * @return
10685  *   0 on success otherwise -errno and errno is set.
10686  */
10687 static int
10688 flow_dv_tag_resource_register
10689                         (struct rte_eth_dev *dev,
10690                          uint32_t tag_be24,
10691                          struct mlx5_flow *dev_flow,
10692                          struct rte_flow_error *error)
10693 {
10694         struct mlx5_priv *priv = dev->data->dev_private;
10695         struct mlx5_flow_dv_tag_resource *resource;
10696         struct mlx5_list_entry *entry;
10697         struct mlx5_flow_cb_ctx ctx = {
10698                                         .error = error,
10699                                         .data = &tag_be24,
10700                                         };
10701         struct mlx5_hlist *tag_table;
10702
10703         tag_table = flow_dv_hlist_prepare(priv->sh, &priv->sh->tag_table,
10704                                       "tags",
10705                                       MLX5_TAGS_HLIST_ARRAY_SIZE,
10706                                       false, false, priv->sh,
10707                                       flow_dv_tag_create_cb,
10708                                       flow_dv_tag_match_cb,
10709                                       flow_dv_tag_remove_cb,
10710                                       flow_dv_tag_clone_cb,
10711                                       flow_dv_tag_clone_free_cb);
10712         if (unlikely(!tag_table))
10713                 return -rte_errno;
10714         entry = mlx5_hlist_register(tag_table, tag_be24, &ctx);
10715         if (entry) {
10716                 resource = container_of(entry, struct mlx5_flow_dv_tag_resource,
10717                                         entry);
10718                 dev_flow->handle->dvh.rix_tag = resource->idx;
10719                 dev_flow->dv.tag_resource = resource;
10720                 return 0;
10721         }
10722         return -rte_errno;
10723 }
10724
10725 void
10726 flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10727 {
10728         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10729         struct mlx5_flow_dv_tag_resource *tag =
10730                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10731
10732         MLX5_ASSERT(tag && sh && tag->action);
10733         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
10734         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
10735         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10736 }
10737
10738 /**
10739  * Release the tag.
10740  *
10741  * @param dev
10742  *   Pointer to Ethernet device.
10743  * @param tag_idx
10744  *   Tag index.
10745  *
10746  * @return
10747  *   1 while a reference on it exists, 0 when freed.
10748  */
10749 static int
10750 flow_dv_tag_release(struct rte_eth_dev *dev,
10751                     uint32_t tag_idx)
10752 {
10753         struct mlx5_priv *priv = dev->data->dev_private;
10754         struct mlx5_flow_dv_tag_resource *tag;
10755
10756         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
10757         if (!tag)
10758                 return 0;
10759         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
10760                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
10761         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
10762 }
10763
10764 /**
10765  * Translate action PORT_ID / REPRESENTED_PORT to vport.
10766  *
10767  * @param[in] dev
10768  *   Pointer to rte_eth_dev structure.
10769  * @param[in] action
10770  *   Pointer to action PORT_ID / REPRESENTED_PORT.
10771  * @param[out] dst_port_id
10772  *   The target port ID.
10773  * @param[out] error
10774  *   Pointer to the error structure.
10775  *
10776  * @return
10777  *   0 on success, a negative errno value otherwise and rte_errno is set.
10778  */
10779 static int
10780 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
10781                                  const struct rte_flow_action *action,
10782                                  uint32_t *dst_port_id,
10783                                  struct rte_flow_error *error)
10784 {
10785         uint32_t port;
10786         struct mlx5_priv *priv;
10787
10788         switch (action->type) {
10789         case RTE_FLOW_ACTION_TYPE_PORT_ID: {
10790                 const struct rte_flow_action_port_id *conf;
10791
10792                 conf = (const struct rte_flow_action_port_id *)action->conf;
10793                 port = conf->original ? dev->data->port_id : conf->id;
10794                 break;
10795         }
10796         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
10797                 const struct rte_flow_action_ethdev *ethdev;
10798
10799                 ethdev = (const struct rte_flow_action_ethdev *)action->conf;
10800                 port = ethdev->port_id;
10801                 break;
10802         }
10803         default:
10804                 MLX5_ASSERT(false);
10805                 return rte_flow_error_set(error, EINVAL,
10806                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
10807                                           "unknown E-Switch action");
10808         }
10809
10810         priv = mlx5_port_to_eswitch_info(port, false);
10811         if (!priv)
10812                 return rte_flow_error_set(error, -rte_errno,
10813                                           RTE_FLOW_ERROR_TYPE_ACTION,
10814                                           NULL,
10815                                           "No eswitch info was found for port");
10816 #ifdef HAVE_MLX5DV_DR_CREATE_DEST_IB_PORT
10817         /*
10818          * This parameter is transferred to
10819          * mlx5dv_dr_action_create_dest_ib_port().
10820          */
10821         *dst_port_id = priv->dev_port;
10822 #else
10823         /*
10824          * Legacy mode, no LAG configurations is supported.
10825          * This parameter is transferred to
10826          * mlx5dv_dr_action_create_dest_vport().
10827          */
10828         *dst_port_id = priv->vport_id;
10829 #endif
10830         return 0;
10831 }
10832
10833 /**
10834  * Create a counter with aging configuration.
10835  *
10836  * @param[in] dev
10837  *   Pointer to rte_eth_dev structure.
10838  * @param[in] dev_flow
10839  *   Pointer to the mlx5_flow.
10840  * @param[out] count
10841  *   Pointer to the counter action configuration.
10842  * @param[in] age
10843  *   Pointer to the aging action configuration.
10844  *
10845  * @return
10846  *   Index to flow counter on success, 0 otherwise.
10847  */
10848 static uint32_t
10849 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
10850                                 struct mlx5_flow *dev_flow,
10851                                 const struct rte_flow_action_count *count
10852                                         __rte_unused,
10853                                 const struct rte_flow_action_age *age)
10854 {
10855         uint32_t counter;
10856         struct mlx5_age_param *age_param;
10857
10858         counter = flow_dv_counter_alloc(dev, !!age);
10859         if (!counter || age == NULL)
10860                 return counter;
10861         age_param = flow_dv_counter_idx_get_age(dev, counter);
10862         age_param->context = age->context ? age->context :
10863                 (void *)(uintptr_t)(dev_flow->flow_idx);
10864         age_param->timeout = age->timeout;
10865         age_param->port_id = dev->data->port_id;
10866         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
10867         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
10868         return counter;
10869 }
10870
10871 /**
10872  * Add Tx queue matcher
10873  *
10874  * @param[in] dev
10875  *   Pointer to the dev struct.
10876  * @param[in, out] matcher
10877  *   Flow matcher.
10878  * @param[in, out] key
10879  *   Flow matcher value.
10880  * @param[in] item
10881  *   Flow pattern to translate.
10882  * @param[in] inner
10883  *   Item is inner pattern.
10884  */
10885 static void
10886 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
10887                                 void *matcher, void *key,
10888                                 const struct rte_flow_item *item)
10889 {
10890         const struct mlx5_rte_flow_item_tx_queue *queue_m;
10891         const struct mlx5_rte_flow_item_tx_queue *queue_v;
10892         void *misc_m =
10893                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
10894         void *misc_v =
10895                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
10896         struct mlx5_txq_ctrl *txq;
10897         uint32_t queue, mask;
10898
10899         queue_m = (const void *)item->mask;
10900         queue_v = (const void *)item->spec;
10901         if (!queue_v)
10902                 return;
10903         txq = mlx5_txq_get(dev, queue_v->queue);
10904         if (!txq)
10905                 return;
10906         if (txq->type == MLX5_TXQ_TYPE_HAIRPIN)
10907                 queue = txq->obj->sq->id;
10908         else
10909                 queue = txq->obj->sq_obj.sq->id;
10910         mask = queue_m == NULL ? UINT32_MAX : queue_m->queue;
10911         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, mask);
10912         MLX5_SET(fte_match_set_misc, misc_v, source_sqn, queue & mask);
10913         mlx5_txq_release(dev, queue_v->queue);
10914 }
10915
10916 /**
10917  * Set the hash fields according to the @p flow information.
10918  *
10919  * @param[in] dev_flow
10920  *   Pointer to the mlx5_flow.
10921  * @param[in] rss_desc
10922  *   Pointer to the mlx5_flow_rss_desc.
10923  */
10924 static void
10925 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
10926                        struct mlx5_flow_rss_desc *rss_desc)
10927 {
10928         uint64_t items = dev_flow->handle->layers;
10929         int rss_inner = 0;
10930         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
10931
10932         dev_flow->hash_fields = 0;
10933 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
10934         if (rss_desc->level >= 2)
10935                 rss_inner = 1;
10936 #endif
10937         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
10938             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
10939                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
10940                         if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
10941                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
10942                         else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
10943                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
10944                         else
10945                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
10946                 }
10947         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
10948                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
10949                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
10950                         if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
10951                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
10952                         else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
10953                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
10954                         else
10955                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
10956                 }
10957         }
10958         if (dev_flow->hash_fields == 0)
10959                 /*
10960                  * There is no match between the RSS types and the
10961                  * L3 protocol (IPv4/IPv6) defined in the flow rule.
10962                  */
10963                 return;
10964         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
10965             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
10966                 if (rss_types & RTE_ETH_RSS_UDP) {
10967                         if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
10968                                 dev_flow->hash_fields |=
10969                                                 IBV_RX_HASH_SRC_PORT_UDP;
10970                         else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
10971                                 dev_flow->hash_fields |=
10972                                                 IBV_RX_HASH_DST_PORT_UDP;
10973                         else
10974                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
10975                 }
10976         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
10977                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
10978                 if (rss_types & RTE_ETH_RSS_TCP) {
10979                         if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
10980                                 dev_flow->hash_fields |=
10981                                                 IBV_RX_HASH_SRC_PORT_TCP;
10982                         else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
10983                                 dev_flow->hash_fields |=
10984                                                 IBV_RX_HASH_DST_PORT_TCP;
10985                         else
10986                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
10987                 }
10988         }
10989         if (rss_inner)
10990                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
10991 }
10992
10993 /**
10994  * Prepare an Rx Hash queue.
10995  *
10996  * @param dev
10997  *   Pointer to Ethernet device.
10998  * @param[in] dev_flow
10999  *   Pointer to the mlx5_flow.
11000  * @param[in] rss_desc
11001  *   Pointer to the mlx5_flow_rss_desc.
11002  * @param[out] hrxq_idx
11003  *   Hash Rx queue index.
11004  *
11005  * @return
11006  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
11007  */
11008 static struct mlx5_hrxq *
11009 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
11010                      struct mlx5_flow *dev_flow,
11011                      struct mlx5_flow_rss_desc *rss_desc,
11012                      uint32_t *hrxq_idx)
11013 {
11014         struct mlx5_priv *priv = dev->data->dev_private;
11015         struct mlx5_flow_handle *dh = dev_flow->handle;
11016         struct mlx5_hrxq *hrxq;
11017
11018         MLX5_ASSERT(rss_desc->queue_num);
11019         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
11020         rss_desc->hash_fields = dev_flow->hash_fields;
11021         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
11022         rss_desc->shared_rss = 0;
11023         if (rss_desc->hash_fields == 0)
11024                 rss_desc->queue_num = 1;
11025         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
11026         if (!*hrxq_idx)
11027                 return NULL;
11028         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
11029                               *hrxq_idx);
11030         return hrxq;
11031 }
11032
11033 /**
11034  * Release sample sub action resource.
11035  *
11036  * @param[in, out] dev
11037  *   Pointer to rte_eth_dev structure.
11038  * @param[in] act_res
11039  *   Pointer to sample sub action resource.
11040  */
11041 static void
11042 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
11043                                    struct mlx5_flow_sub_actions_idx *act_res)
11044 {
11045         if (act_res->rix_hrxq) {
11046                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
11047                 act_res->rix_hrxq = 0;
11048         }
11049         if (act_res->rix_encap_decap) {
11050                 flow_dv_encap_decap_resource_release(dev,
11051                                                      act_res->rix_encap_decap);
11052                 act_res->rix_encap_decap = 0;
11053         }
11054         if (act_res->rix_port_id_action) {
11055                 flow_dv_port_id_action_resource_release(dev,
11056                                                 act_res->rix_port_id_action);
11057                 act_res->rix_port_id_action = 0;
11058         }
11059         if (act_res->rix_tag) {
11060                 flow_dv_tag_release(dev, act_res->rix_tag);
11061                 act_res->rix_tag = 0;
11062         }
11063         if (act_res->rix_jump) {
11064                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
11065                 act_res->rix_jump = 0;
11066         }
11067 }
11068
11069 int
11070 flow_dv_sample_match_cb(void *tool_ctx __rte_unused,
11071                         struct mlx5_list_entry *entry, void *cb_ctx)
11072 {
11073         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11074         struct rte_eth_dev *dev = ctx->dev;
11075         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
11076         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
11077                                                               typeof(*resource),
11078                                                               entry);
11079
11080         if (ctx_resource->ratio == resource->ratio &&
11081             ctx_resource->ft_type == resource->ft_type &&
11082             ctx_resource->ft_id == resource->ft_id &&
11083             ctx_resource->set_action == resource->set_action &&
11084             !memcmp((void *)&ctx_resource->sample_act,
11085                     (void *)&resource->sample_act,
11086                     sizeof(struct mlx5_flow_sub_actions_list))) {
11087                 /*
11088                  * Existing sample action should release the prepared
11089                  * sub-actions reference counter.
11090                  */
11091                 flow_dv_sample_sub_actions_release(dev,
11092                                                    &ctx_resource->sample_idx);
11093                 return 0;
11094         }
11095         return 1;
11096 }
11097
11098 struct mlx5_list_entry *
11099 flow_dv_sample_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11100 {
11101         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11102         struct rte_eth_dev *dev = ctx->dev;
11103         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
11104         void **sample_dv_actions = ctx_resource->sub_actions;
11105         struct mlx5_flow_dv_sample_resource *resource;
11106         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
11107         struct mlx5_priv *priv = dev->data->dev_private;
11108         struct mlx5_dev_ctx_shared *sh = priv->sh;
11109         struct mlx5_flow_tbl_resource *tbl;
11110         uint32_t idx = 0;
11111         const uint32_t next_ft_step = 1;
11112         uint32_t next_ft_id = ctx_resource->ft_id + next_ft_step;
11113         uint8_t is_egress = 0;
11114         uint8_t is_transfer = 0;
11115         struct rte_flow_error *error = ctx->error;
11116
11117         /* Register new sample resource. */
11118         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11119         if (!resource) {
11120                 rte_flow_error_set(error, ENOMEM,
11121                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11122                                           NULL,
11123                                           "cannot allocate resource memory");
11124                 return NULL;
11125         }
11126         *resource = *ctx_resource;
11127         /* Create normal path table level */
11128         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11129                 is_transfer = 1;
11130         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
11131                 is_egress = 1;
11132         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
11133                                         is_egress, is_transfer,
11134                                         true, NULL, 0, 0, 0, error);
11135         if (!tbl) {
11136                 rte_flow_error_set(error, ENOMEM,
11137                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11138                                           NULL,
11139                                           "fail to create normal path table "
11140                                           "for sample");
11141                 goto error;
11142         }
11143         resource->normal_path_tbl = tbl;
11144         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
11145                 if (!sh->default_miss_action) {
11146                         rte_flow_error_set(error, ENOMEM,
11147                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11148                                                 NULL,
11149                                                 "default miss action was not "
11150                                                 "created");
11151                         goto error;
11152                 }
11153                 sample_dv_actions[ctx_resource->sample_act.actions_num++] =
11154                                                 sh->default_miss_action;
11155         }
11156         /* Create a DR sample action */
11157         sampler_attr.sample_ratio = resource->ratio;
11158         sampler_attr.default_next_table = tbl->obj;
11159         sampler_attr.num_sample_actions = ctx_resource->sample_act.actions_num;
11160         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
11161                                                         &sample_dv_actions[0];
11162         sampler_attr.action = resource->set_action;
11163         if (mlx5_os_flow_dr_create_flow_action_sampler
11164                         (&sampler_attr, &resource->verbs_action)) {
11165                 rte_flow_error_set(error, ENOMEM,
11166                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11167                                         NULL, "cannot create sample action");
11168                 goto error;
11169         }
11170         resource->idx = idx;
11171         resource->dev = dev;
11172         return &resource->entry;
11173 error:
11174         if (resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
11175                 flow_dv_sample_sub_actions_release(dev,
11176                                                    &resource->sample_idx);
11177         if (resource->normal_path_tbl)
11178                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11179                                 resource->normal_path_tbl);
11180         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
11181         return NULL;
11182
11183 }
11184
11185 struct mlx5_list_entry *
11186 flow_dv_sample_clone_cb(void *tool_ctx __rte_unused,
11187                          struct mlx5_list_entry *entry __rte_unused,
11188                          void *cb_ctx)
11189 {
11190         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11191         struct rte_eth_dev *dev = ctx->dev;
11192         struct mlx5_flow_dv_sample_resource *resource;
11193         struct mlx5_priv *priv = dev->data->dev_private;
11194         struct mlx5_dev_ctx_shared *sh = priv->sh;
11195         uint32_t idx = 0;
11196
11197         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11198         if (!resource) {
11199                 rte_flow_error_set(ctx->error, ENOMEM,
11200                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11201                                           NULL,
11202                                           "cannot allocate resource memory");
11203                 return NULL;
11204         }
11205         memcpy(resource, entry, sizeof(*resource));
11206         resource->idx = idx;
11207         resource->dev = dev;
11208         return &resource->entry;
11209 }
11210
11211 void
11212 flow_dv_sample_clone_free_cb(void *tool_ctx __rte_unused,
11213                              struct mlx5_list_entry *entry)
11214 {
11215         struct mlx5_flow_dv_sample_resource *resource =
11216                                   container_of(entry, typeof(*resource), entry);
11217         struct rte_eth_dev *dev = resource->dev;
11218         struct mlx5_priv *priv = dev->data->dev_private;
11219
11220         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
11221 }
11222
11223 /**
11224  * Find existing sample resource or create and register a new one.
11225  *
11226  * @param[in, out] dev
11227  *   Pointer to rte_eth_dev structure.
11228  * @param[in] ref
11229  *   Pointer to sample resource reference.
11230  * @parm[in, out] dev_flow
11231  *   Pointer to the dev_flow.
11232  * @param[out] error
11233  *   pointer to error structure.
11234  *
11235  * @return
11236  *   0 on success otherwise -errno and errno is set.
11237  */
11238 static int
11239 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
11240                          struct mlx5_flow_dv_sample_resource *ref,
11241                          struct mlx5_flow *dev_flow,
11242                          struct rte_flow_error *error)
11243 {
11244         struct mlx5_flow_dv_sample_resource *resource;
11245         struct mlx5_list_entry *entry;
11246         struct mlx5_priv *priv = dev->data->dev_private;
11247         struct mlx5_flow_cb_ctx ctx = {
11248                 .dev = dev,
11249                 .error = error,
11250                 .data = ref,
11251         };
11252
11253         entry = mlx5_list_register(priv->sh->sample_action_list, &ctx);
11254         if (!entry)
11255                 return -rte_errno;
11256         resource = container_of(entry, typeof(*resource), entry);
11257         dev_flow->handle->dvh.rix_sample = resource->idx;
11258         dev_flow->dv.sample_res = resource;
11259         return 0;
11260 }
11261
11262 int
11263 flow_dv_dest_array_match_cb(void *tool_ctx __rte_unused,
11264                             struct mlx5_list_entry *entry, void *cb_ctx)
11265 {
11266         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11267         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11268         struct rte_eth_dev *dev = ctx->dev;
11269         struct mlx5_flow_dv_dest_array_resource *resource =
11270                                   container_of(entry, typeof(*resource), entry);
11271         uint32_t idx = 0;
11272
11273         if (ctx_resource->num_of_dest == resource->num_of_dest &&
11274             ctx_resource->ft_type == resource->ft_type &&
11275             !memcmp((void *)resource->sample_act,
11276                     (void *)ctx_resource->sample_act,
11277                    (ctx_resource->num_of_dest *
11278                    sizeof(struct mlx5_flow_sub_actions_list)))) {
11279                 /*
11280                  * Existing sample action should release the prepared
11281                  * sub-actions reference counter.
11282                  */
11283                 for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11284                         flow_dv_sample_sub_actions_release(dev,
11285                                         &ctx_resource->sample_idx[idx]);
11286                 return 0;
11287         }
11288         return 1;
11289 }
11290
11291 struct mlx5_list_entry *
11292 flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11293 {
11294         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11295         struct rte_eth_dev *dev = ctx->dev;
11296         struct mlx5_flow_dv_dest_array_resource *resource;
11297         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11298         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
11299         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
11300         struct mlx5_priv *priv = dev->data->dev_private;
11301         struct mlx5_dev_ctx_shared *sh = priv->sh;
11302         struct mlx5_flow_sub_actions_list *sample_act;
11303         struct mlx5dv_dr_domain *domain;
11304         uint32_t idx = 0, res_idx = 0;
11305         struct rte_flow_error *error = ctx->error;
11306         uint64_t action_flags;
11307         int ret;
11308
11309         /* Register new destination array resource. */
11310         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11311                                             &res_idx);
11312         if (!resource) {
11313                 rte_flow_error_set(error, ENOMEM,
11314                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11315                                           NULL,
11316                                           "cannot allocate resource memory");
11317                 return NULL;
11318         }
11319         *resource = *ctx_resource;
11320         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11321                 domain = sh->fdb_domain;
11322         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
11323                 domain = sh->rx_domain;
11324         else
11325                 domain = sh->tx_domain;
11326         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11327                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
11328                                  mlx5_malloc(MLX5_MEM_ZERO,
11329                                  sizeof(struct mlx5dv_dr_action_dest_attr),
11330                                  0, SOCKET_ID_ANY);
11331                 if (!dest_attr[idx]) {
11332                         rte_flow_error_set(error, ENOMEM,
11333                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11334                                            NULL,
11335                                            "cannot allocate resource memory");
11336                         goto error;
11337                 }
11338                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
11339                 sample_act = &ctx_resource->sample_act[idx];
11340                 action_flags = sample_act->action_flags;
11341                 switch (action_flags) {
11342                 case MLX5_FLOW_ACTION_QUEUE:
11343                         dest_attr[idx]->dest = sample_act->dr_queue_action;
11344                         break;
11345                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
11346                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
11347                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
11348                         dest_attr[idx]->dest_reformat->reformat =
11349                                         sample_act->dr_encap_action;
11350                         dest_attr[idx]->dest_reformat->dest =
11351                                         sample_act->dr_port_id_action;
11352                         break;
11353                 case MLX5_FLOW_ACTION_PORT_ID:
11354                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
11355                         break;
11356                 case MLX5_FLOW_ACTION_JUMP:
11357                         dest_attr[idx]->dest = sample_act->dr_jump_action;
11358                         break;
11359                 default:
11360                         rte_flow_error_set(error, EINVAL,
11361                                            RTE_FLOW_ERROR_TYPE_ACTION,
11362                                            NULL,
11363                                            "unsupported actions type");
11364                         goto error;
11365                 }
11366         }
11367         /* create a dest array actioin */
11368         ret = mlx5_os_flow_dr_create_flow_action_dest_array
11369                                                 (domain,
11370                                                  resource->num_of_dest,
11371                                                  dest_attr,
11372                                                  &resource->action);
11373         if (ret) {
11374                 rte_flow_error_set(error, ENOMEM,
11375                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11376                                    NULL,
11377                                    "cannot create destination array action");
11378                 goto error;
11379         }
11380         resource->idx = res_idx;
11381         resource->dev = dev;
11382         for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11383                 mlx5_free(dest_attr[idx]);
11384         return &resource->entry;
11385 error:
11386         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11387                 flow_dv_sample_sub_actions_release(dev,
11388                                                    &resource->sample_idx[idx]);
11389                 if (dest_attr[idx])
11390                         mlx5_free(dest_attr[idx]);
11391         }
11392         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
11393         return NULL;
11394 }
11395
11396 struct mlx5_list_entry *
11397 flow_dv_dest_array_clone_cb(void *tool_ctx __rte_unused,
11398                             struct mlx5_list_entry *entry __rte_unused,
11399                             void *cb_ctx)
11400 {
11401         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11402         struct rte_eth_dev *dev = ctx->dev;
11403         struct mlx5_flow_dv_dest_array_resource *resource;
11404         struct mlx5_priv *priv = dev->data->dev_private;
11405         struct mlx5_dev_ctx_shared *sh = priv->sh;
11406         uint32_t res_idx = 0;
11407         struct rte_flow_error *error = ctx->error;
11408
11409         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11410                                       &res_idx);
11411         if (!resource) {
11412                 rte_flow_error_set(error, ENOMEM,
11413                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11414                                           NULL,
11415                                           "cannot allocate dest-array memory");
11416                 return NULL;
11417         }
11418         memcpy(resource, entry, sizeof(*resource));
11419         resource->idx = res_idx;
11420         resource->dev = dev;
11421         return &resource->entry;
11422 }
11423
11424 void
11425 flow_dv_dest_array_clone_free_cb(void *tool_ctx __rte_unused,
11426                                  struct mlx5_list_entry *entry)
11427 {
11428         struct mlx5_flow_dv_dest_array_resource *resource =
11429                         container_of(entry, typeof(*resource), entry);
11430         struct rte_eth_dev *dev = resource->dev;
11431         struct mlx5_priv *priv = dev->data->dev_private;
11432
11433         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
11434 }
11435
11436 /**
11437  * Find existing destination array resource or create and register a new one.
11438  *
11439  * @param[in, out] dev
11440  *   Pointer to rte_eth_dev structure.
11441  * @param[in] ref
11442  *   Pointer to destination array resource reference.
11443  * @parm[in, out] dev_flow
11444  *   Pointer to the dev_flow.
11445  * @param[out] error
11446  *   pointer to error structure.
11447  *
11448  * @return
11449  *   0 on success otherwise -errno and errno is set.
11450  */
11451 static int
11452 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
11453                          struct mlx5_flow_dv_dest_array_resource *ref,
11454                          struct mlx5_flow *dev_flow,
11455                          struct rte_flow_error *error)
11456 {
11457         struct mlx5_flow_dv_dest_array_resource *resource;
11458         struct mlx5_priv *priv = dev->data->dev_private;
11459         struct mlx5_list_entry *entry;
11460         struct mlx5_flow_cb_ctx ctx = {
11461                 .dev = dev,
11462                 .error = error,
11463                 .data = ref,
11464         };
11465
11466         entry = mlx5_list_register(priv->sh->dest_array_list, &ctx);
11467         if (!entry)
11468                 return -rte_errno;
11469         resource = container_of(entry, typeof(*resource), entry);
11470         dev_flow->handle->dvh.rix_dest_array = resource->idx;
11471         dev_flow->dv.dest_array_res = resource;
11472         return 0;
11473 }
11474
11475 /**
11476  * Convert Sample action to DV specification.
11477  *
11478  * @param[in] dev
11479  *   Pointer to rte_eth_dev structure.
11480  * @param[in] action
11481  *   Pointer to sample action structure.
11482  * @param[in, out] dev_flow
11483  *   Pointer to the mlx5_flow.
11484  * @param[in] attr
11485  *   Pointer to the flow attributes.
11486  * @param[in, out] num_of_dest
11487  *   Pointer to the num of destination.
11488  * @param[in, out] sample_actions
11489  *   Pointer to sample actions list.
11490  * @param[in, out] res
11491  *   Pointer to sample resource.
11492  * @param[out] error
11493  *   Pointer to the error structure.
11494  *
11495  * @return
11496  *   0 on success, a negative errno value otherwise and rte_errno is set.
11497  */
11498 static int
11499 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
11500                                 const struct rte_flow_action_sample *action,
11501                                 struct mlx5_flow *dev_flow,
11502                                 const struct rte_flow_attr *attr,
11503                                 uint32_t *num_of_dest,
11504                                 void **sample_actions,
11505                                 struct mlx5_flow_dv_sample_resource *res,
11506                                 struct rte_flow_error *error)
11507 {
11508         struct mlx5_priv *priv = dev->data->dev_private;
11509         const struct rte_flow_action *sub_actions;
11510         struct mlx5_flow_sub_actions_list *sample_act;
11511         struct mlx5_flow_sub_actions_idx *sample_idx;
11512         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11513         struct rte_flow *flow = dev_flow->flow;
11514         struct mlx5_flow_rss_desc *rss_desc;
11515         uint64_t action_flags = 0;
11516
11517         MLX5_ASSERT(wks);
11518         rss_desc = &wks->rss_desc;
11519         sample_act = &res->sample_act;
11520         sample_idx = &res->sample_idx;
11521         res->ratio = action->ratio;
11522         sub_actions = action->actions;
11523         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
11524                 int type = sub_actions->type;
11525                 uint32_t pre_rix = 0;
11526                 void *pre_r;
11527                 switch (type) {
11528                 case RTE_FLOW_ACTION_TYPE_QUEUE:
11529                 {
11530                         const struct rte_flow_action_queue *queue;
11531                         struct mlx5_hrxq *hrxq;
11532                         uint32_t hrxq_idx;
11533
11534                         queue = sub_actions->conf;
11535                         rss_desc->queue_num = 1;
11536                         rss_desc->queue[0] = queue->index;
11537                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11538                                                     rss_desc, &hrxq_idx);
11539                         if (!hrxq)
11540                                 return rte_flow_error_set
11541                                         (error, rte_errno,
11542                                          RTE_FLOW_ERROR_TYPE_ACTION,
11543                                          NULL,
11544                                          "cannot create fate queue");
11545                         sample_act->dr_queue_action = hrxq->action;
11546                         sample_idx->rix_hrxq = hrxq_idx;
11547                         sample_actions[sample_act->actions_num++] =
11548                                                 hrxq->action;
11549                         (*num_of_dest)++;
11550                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
11551                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11552                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11553                         dev_flow->handle->fate_action =
11554                                         MLX5_FLOW_FATE_QUEUE;
11555                         break;
11556                 }
11557                 case RTE_FLOW_ACTION_TYPE_RSS:
11558                 {
11559                         struct mlx5_hrxq *hrxq;
11560                         uint32_t hrxq_idx;
11561                         const struct rte_flow_action_rss *rss;
11562                         const uint8_t *rss_key;
11563
11564                         rss = sub_actions->conf;
11565                         memcpy(rss_desc->queue, rss->queue,
11566                                rss->queue_num * sizeof(uint16_t));
11567                         rss_desc->queue_num = rss->queue_num;
11568                         /* NULL RSS key indicates default RSS key. */
11569                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11570                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11571                         /*
11572                          * rss->level and rss.types should be set in advance
11573                          * when expanding items for RSS.
11574                          */
11575                         flow_dv_hashfields_set(dev_flow, rss_desc);
11576                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11577                                                     rss_desc, &hrxq_idx);
11578                         if (!hrxq)
11579                                 return rte_flow_error_set
11580                                         (error, rte_errno,
11581                                          RTE_FLOW_ERROR_TYPE_ACTION,
11582                                          NULL,
11583                                          "cannot create fate queue");
11584                         sample_act->dr_queue_action = hrxq->action;
11585                         sample_idx->rix_hrxq = hrxq_idx;
11586                         sample_actions[sample_act->actions_num++] =
11587                                                 hrxq->action;
11588                         (*num_of_dest)++;
11589                         action_flags |= MLX5_FLOW_ACTION_RSS;
11590                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11591                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11592                         dev_flow->handle->fate_action =
11593                                         MLX5_FLOW_FATE_QUEUE;
11594                         break;
11595                 }
11596                 case RTE_FLOW_ACTION_TYPE_MARK:
11597                 {
11598                         uint32_t tag_be = mlx5_flow_mark_set
11599                                 (((const struct rte_flow_action_mark *)
11600                                 (sub_actions->conf))->id);
11601
11602                         dev_flow->handle->mark = 1;
11603                         pre_rix = dev_flow->handle->dvh.rix_tag;
11604                         /* Save the mark resource before sample */
11605                         pre_r = dev_flow->dv.tag_resource;
11606                         if (flow_dv_tag_resource_register(dev, tag_be,
11607                                                   dev_flow, error))
11608                                 return -rte_errno;
11609                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11610                         sample_act->dr_tag_action =
11611                                 dev_flow->dv.tag_resource->action;
11612                         sample_idx->rix_tag =
11613                                 dev_flow->handle->dvh.rix_tag;
11614                         sample_actions[sample_act->actions_num++] =
11615                                                 sample_act->dr_tag_action;
11616                         /* Recover the mark resource after sample */
11617                         dev_flow->dv.tag_resource = pre_r;
11618                         dev_flow->handle->dvh.rix_tag = pre_rix;
11619                         action_flags |= MLX5_FLOW_ACTION_MARK;
11620                         break;
11621                 }
11622                 case RTE_FLOW_ACTION_TYPE_COUNT:
11623                 {
11624                         if (!flow->counter) {
11625                                 flow->counter =
11626                                         flow_dv_translate_create_counter(dev,
11627                                                 dev_flow, sub_actions->conf,
11628                                                 0);
11629                                 if (!flow->counter)
11630                                         return rte_flow_error_set
11631                                                 (error, rte_errno,
11632                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11633                                                 NULL,
11634                                                 "cannot create counter"
11635                                                 " object.");
11636                         }
11637                         sample_act->dr_cnt_action =
11638                                   (flow_dv_counter_get_by_idx(dev,
11639                                   flow->counter, NULL))->action;
11640                         sample_actions[sample_act->actions_num++] =
11641                                                 sample_act->dr_cnt_action;
11642                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11643                         break;
11644                 }
11645                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11646                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
11647                 {
11648                         struct mlx5_flow_dv_port_id_action_resource
11649                                         port_id_resource;
11650                         uint32_t port_id = 0;
11651
11652                         memset(&port_id_resource, 0, sizeof(port_id_resource));
11653                         /* Save the port id resource before sample */
11654                         pre_rix = dev_flow->handle->rix_port_id_action;
11655                         pre_r = dev_flow->dv.port_id_action;
11656                         if (flow_dv_translate_action_port_id(dev, sub_actions,
11657                                                              &port_id, error))
11658                                 return -rte_errno;
11659                         port_id_resource.port_id = port_id;
11660                         if (flow_dv_port_id_action_resource_register
11661                             (dev, &port_id_resource, dev_flow, error))
11662                                 return -rte_errno;
11663                         sample_act->dr_port_id_action =
11664                                 dev_flow->dv.port_id_action->action;
11665                         sample_idx->rix_port_id_action =
11666                                 dev_flow->handle->rix_port_id_action;
11667                         sample_actions[sample_act->actions_num++] =
11668                                                 sample_act->dr_port_id_action;
11669                         /* Recover the port id resource after sample */
11670                         dev_flow->dv.port_id_action = pre_r;
11671                         dev_flow->handle->rix_port_id_action = pre_rix;
11672                         (*num_of_dest)++;
11673                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11674                         break;
11675                 }
11676                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11677                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11678                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11679                         /* Save the encap resource before sample */
11680                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
11681                         pre_r = dev_flow->dv.encap_decap;
11682                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
11683                                                            dev_flow,
11684                                                            attr->transfer,
11685                                                            error))
11686                                 return -rte_errno;
11687                         sample_act->dr_encap_action =
11688                                 dev_flow->dv.encap_decap->action;
11689                         sample_idx->rix_encap_decap =
11690                                 dev_flow->handle->dvh.rix_encap_decap;
11691                         sample_actions[sample_act->actions_num++] =
11692                                                 sample_act->dr_encap_action;
11693                         /* Recover the encap resource after sample */
11694                         dev_flow->dv.encap_decap = pre_r;
11695                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
11696                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11697                         break;
11698                 default:
11699                         return rte_flow_error_set(error, EINVAL,
11700                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11701                                 NULL,
11702                                 "Not support for sampler action");
11703                 }
11704         }
11705         sample_act->action_flags = action_flags;
11706         res->ft_id = dev_flow->dv.group;
11707         if (attr->transfer) {
11708                 union {
11709                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
11710                         uint64_t set_action;
11711                 } action_ctx = { .set_action = 0 };
11712
11713                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
11714                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
11715                          MLX5_MODIFICATION_TYPE_SET);
11716                 MLX5_SET(set_action_in, action_ctx.action_in, field,
11717                          MLX5_MODI_META_REG_C_0);
11718                 MLX5_SET(set_action_in, action_ctx.action_in, data,
11719                          priv->vport_meta_tag);
11720                 res->set_action = action_ctx.set_action;
11721         } else if (attr->ingress) {
11722                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11723         } else {
11724                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
11725         }
11726         return 0;
11727 }
11728
11729 /**
11730  * Convert Sample action to DV specification.
11731  *
11732  * @param[in] dev
11733  *   Pointer to rte_eth_dev structure.
11734  * @param[in, out] dev_flow
11735  *   Pointer to the mlx5_flow.
11736  * @param[in] num_of_dest
11737  *   The num of destination.
11738  * @param[in, out] res
11739  *   Pointer to sample resource.
11740  * @param[in, out] mdest_res
11741  *   Pointer to destination array resource.
11742  * @param[in] sample_actions
11743  *   Pointer to sample path actions list.
11744  * @param[in] action_flags
11745  *   Holds the actions detected until now.
11746  * @param[out] error
11747  *   Pointer to the error structure.
11748  *
11749  * @return
11750  *   0 on success, a negative errno value otherwise and rte_errno is set.
11751  */
11752 static int
11753 flow_dv_create_action_sample(struct rte_eth_dev *dev,
11754                              struct mlx5_flow *dev_flow,
11755                              uint32_t num_of_dest,
11756                              struct mlx5_flow_dv_sample_resource *res,
11757                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
11758                              void **sample_actions,
11759                              uint64_t action_flags,
11760                              struct rte_flow_error *error)
11761 {
11762         /* update normal path action resource into last index of array */
11763         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
11764         struct mlx5_flow_sub_actions_list *sample_act =
11765                                         &mdest_res->sample_act[dest_index];
11766         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11767         struct mlx5_flow_rss_desc *rss_desc;
11768         uint32_t normal_idx = 0;
11769         struct mlx5_hrxq *hrxq;
11770         uint32_t hrxq_idx;
11771
11772         MLX5_ASSERT(wks);
11773         rss_desc = &wks->rss_desc;
11774         if (num_of_dest > 1) {
11775                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
11776                         /* Handle QP action for mirroring */
11777                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11778                                                     rss_desc, &hrxq_idx);
11779                         if (!hrxq)
11780                                 return rte_flow_error_set
11781                                      (error, rte_errno,
11782                                       RTE_FLOW_ERROR_TYPE_ACTION,
11783                                       NULL,
11784                                       "cannot create rx queue");
11785                         normal_idx++;
11786                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
11787                         sample_act->dr_queue_action = hrxq->action;
11788                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11789                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11790                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11791                 }
11792                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
11793                         normal_idx++;
11794                         mdest_res->sample_idx[dest_index].rix_encap_decap =
11795                                 dev_flow->handle->dvh.rix_encap_decap;
11796                         sample_act->dr_encap_action =
11797                                 dev_flow->dv.encap_decap->action;
11798                         dev_flow->handle->dvh.rix_encap_decap = 0;
11799                 }
11800                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
11801                         normal_idx++;
11802                         mdest_res->sample_idx[dest_index].rix_port_id_action =
11803                                 dev_flow->handle->rix_port_id_action;
11804                         sample_act->dr_port_id_action =
11805                                 dev_flow->dv.port_id_action->action;
11806                         dev_flow->handle->rix_port_id_action = 0;
11807                 }
11808                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
11809                         normal_idx++;
11810                         mdest_res->sample_idx[dest_index].rix_jump =
11811                                 dev_flow->handle->rix_jump;
11812                         sample_act->dr_jump_action =
11813                                 dev_flow->dv.jump->action;
11814                         dev_flow->handle->rix_jump = 0;
11815                 }
11816                 sample_act->actions_num = normal_idx;
11817                 /* update sample action resource into first index of array */
11818                 mdest_res->ft_type = res->ft_type;
11819                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
11820                                 sizeof(struct mlx5_flow_sub_actions_idx));
11821                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
11822                                 sizeof(struct mlx5_flow_sub_actions_list));
11823                 mdest_res->num_of_dest = num_of_dest;
11824                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
11825                                                          dev_flow, error))
11826                         return rte_flow_error_set(error, EINVAL,
11827                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11828                                                   NULL, "can't create sample "
11829                                                   "action");
11830         } else {
11831                 res->sub_actions = sample_actions;
11832                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
11833                         return rte_flow_error_set(error, EINVAL,
11834                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11835                                                   NULL,
11836                                                   "can't create sample action");
11837         }
11838         return 0;
11839 }
11840
11841 /**
11842  * Remove an ASO age action from age actions list.
11843  *
11844  * @param[in] dev
11845  *   Pointer to the Ethernet device structure.
11846  * @param[in] age
11847  *   Pointer to the aso age action handler.
11848  */
11849 static void
11850 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
11851                                 struct mlx5_aso_age_action *age)
11852 {
11853         struct mlx5_age_info *age_info;
11854         struct mlx5_age_param *age_param = &age->age_params;
11855         struct mlx5_priv *priv = dev->data->dev_private;
11856         uint16_t expected = AGE_CANDIDATE;
11857
11858         age_info = GET_PORT_AGE_INFO(priv);
11859         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
11860                                          AGE_FREE, false, __ATOMIC_RELAXED,
11861                                          __ATOMIC_RELAXED)) {
11862                 /**
11863                  * We need the lock even it is age timeout,
11864                  * since age action may still in process.
11865                  */
11866                 rte_spinlock_lock(&age_info->aged_sl);
11867                 LIST_REMOVE(age, next);
11868                 rte_spinlock_unlock(&age_info->aged_sl);
11869                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
11870         }
11871 }
11872
11873 /**
11874  * Release an ASO age action.
11875  *
11876  * @param[in] dev
11877  *   Pointer to the Ethernet device structure.
11878  * @param[in] age_idx
11879  *   Index of ASO age action to release.
11880  * @param[in] flow
11881  *   True if the release operation is during flow destroy operation.
11882  *   False if the release operation is during action destroy operation.
11883  *
11884  * @return
11885  *   0 when age action was removed, otherwise the number of references.
11886  */
11887 static int
11888 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
11889 {
11890         struct mlx5_priv *priv = dev->data->dev_private;
11891         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11892         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
11893         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
11894
11895         if (!ret) {
11896                 flow_dv_aso_age_remove_from_age(dev, age);
11897                 rte_spinlock_lock(&mng->free_sl);
11898                 LIST_INSERT_HEAD(&mng->free, age, next);
11899                 rte_spinlock_unlock(&mng->free_sl);
11900         }
11901         return ret;
11902 }
11903
11904 /**
11905  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
11906  *
11907  * @param[in] dev
11908  *   Pointer to the Ethernet device structure.
11909  *
11910  * @return
11911  *   0 on success, otherwise negative errno value and rte_errno is set.
11912  */
11913 static int
11914 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
11915 {
11916         struct mlx5_priv *priv = dev->data->dev_private;
11917         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11918         void *old_pools = mng->pools;
11919         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
11920         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
11921         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11922
11923         if (!pools) {
11924                 rte_errno = ENOMEM;
11925                 return -ENOMEM;
11926         }
11927         if (old_pools) {
11928                 memcpy(pools, old_pools,
11929                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
11930                 mlx5_free(old_pools);
11931         } else {
11932                 /* First ASO flow hit allocation - starting ASO data-path. */
11933                 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
11934
11935                 if (ret) {
11936                         mlx5_free(pools);
11937                         return ret;
11938                 }
11939         }
11940         mng->n = resize;
11941         mng->pools = pools;
11942         return 0;
11943 }
11944
11945 /**
11946  * Create and initialize a new ASO aging pool.
11947  *
11948  * @param[in] dev
11949  *   Pointer to the Ethernet device structure.
11950  * @param[out] age_free
11951  *   Where to put the pointer of a new age action.
11952  *
11953  * @return
11954  *   The age actions pool pointer and @p age_free is set on success,
11955  *   NULL otherwise and rte_errno is set.
11956  */
11957 static struct mlx5_aso_age_pool *
11958 flow_dv_age_pool_create(struct rte_eth_dev *dev,
11959                         struct mlx5_aso_age_action **age_free)
11960 {
11961         struct mlx5_priv *priv = dev->data->dev_private;
11962         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11963         struct mlx5_aso_age_pool *pool = NULL;
11964         struct mlx5_devx_obj *obj = NULL;
11965         uint32_t i;
11966
11967         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->cdev->ctx,
11968                                                     priv->sh->cdev->pdn);
11969         if (!obj) {
11970                 rte_errno = ENODATA;
11971                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
11972                 return NULL;
11973         }
11974         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
11975         if (!pool) {
11976                 claim_zero(mlx5_devx_cmd_destroy(obj));
11977                 rte_errno = ENOMEM;
11978                 return NULL;
11979         }
11980         pool->flow_hit_aso_obj = obj;
11981         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
11982         rte_rwlock_write_lock(&mng->resize_rwl);
11983         pool->index = mng->next;
11984         /* Resize pools array if there is no room for the new pool in it. */
11985         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
11986                 claim_zero(mlx5_devx_cmd_destroy(obj));
11987                 mlx5_free(pool);
11988                 rte_rwlock_write_unlock(&mng->resize_rwl);
11989                 return NULL;
11990         }
11991         mng->pools[pool->index] = pool;
11992         mng->next++;
11993         rte_rwlock_write_unlock(&mng->resize_rwl);
11994         /* Assign the first action in the new pool, the rest go to free list. */
11995         *age_free = &pool->actions[0];
11996         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
11997                 pool->actions[i].offset = i;
11998                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
11999         }
12000         return pool;
12001 }
12002
12003 /**
12004  * Allocate a ASO aging bit.
12005  *
12006  * @param[in] dev
12007  *   Pointer to the Ethernet device structure.
12008  * @param[out] error
12009  *   Pointer to the error structure.
12010  *
12011  * @return
12012  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
12013  */
12014 static uint32_t
12015 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12016 {
12017         struct mlx5_priv *priv = dev->data->dev_private;
12018         const struct mlx5_aso_age_pool *pool;
12019         struct mlx5_aso_age_action *age_free = NULL;
12020         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
12021
12022         MLX5_ASSERT(mng);
12023         /* Try to get the next free age action bit. */
12024         rte_spinlock_lock(&mng->free_sl);
12025         age_free = LIST_FIRST(&mng->free);
12026         if (age_free) {
12027                 LIST_REMOVE(age_free, next);
12028         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
12029                 rte_spinlock_unlock(&mng->free_sl);
12030                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12031                                    NULL, "failed to create ASO age pool");
12032                 return 0; /* 0 is an error. */
12033         }
12034         rte_spinlock_unlock(&mng->free_sl);
12035         pool = container_of
12036           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
12037                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
12038                                                                        actions);
12039         if (!age_free->dr_action) {
12040                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
12041                                                  error);
12042
12043                 if (reg_c < 0) {
12044                         rte_flow_error_set(error, rte_errno,
12045                                            RTE_FLOW_ERROR_TYPE_ACTION,
12046                                            NULL, "failed to get reg_c "
12047                                            "for ASO flow hit");
12048                         return 0; /* 0 is an error. */
12049                 }
12050 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
12051                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
12052                                 (priv->sh->rx_domain,
12053                                  pool->flow_hit_aso_obj->obj, age_free->offset,
12054                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
12055                                  (reg_c - REG_C_0));
12056 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
12057                 if (!age_free->dr_action) {
12058                         rte_errno = errno;
12059                         rte_spinlock_lock(&mng->free_sl);
12060                         LIST_INSERT_HEAD(&mng->free, age_free, next);
12061                         rte_spinlock_unlock(&mng->free_sl);
12062                         rte_flow_error_set(error, rte_errno,
12063                                            RTE_FLOW_ERROR_TYPE_ACTION,
12064                                            NULL, "failed to create ASO "
12065                                            "flow hit action");
12066                         return 0; /* 0 is an error. */
12067                 }
12068         }
12069         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
12070         return pool->index | ((age_free->offset + 1) << 16);
12071 }
12072
12073 /**
12074  * Initialize flow ASO age parameters.
12075  *
12076  * @param[in] dev
12077  *   Pointer to rte_eth_dev structure.
12078  * @param[in] age_idx
12079  *   Index of ASO age action.
12080  * @param[in] context
12081  *   Pointer to flow counter age context.
12082  * @param[in] timeout
12083  *   Aging timeout in seconds.
12084  *
12085  */
12086 static void
12087 flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
12088                             uint32_t age_idx,
12089                             void *context,
12090                             uint32_t timeout)
12091 {
12092         struct mlx5_aso_age_action *aso_age;
12093
12094         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
12095         MLX5_ASSERT(aso_age);
12096         aso_age->age_params.context = context;
12097         aso_age->age_params.timeout = timeout;
12098         aso_age->age_params.port_id = dev->data->port_id;
12099         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
12100                          __ATOMIC_RELAXED);
12101         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
12102                          __ATOMIC_RELAXED);
12103 }
12104
12105 static void
12106 flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
12107                                const struct rte_flow_item_integrity *value,
12108                                void *headers_m, void *headers_v)
12109 {
12110         if (mask->l4_ok) {
12111                 /* application l4_ok filter aggregates all hardware l4 filters
12112                  * therefore hw l4_checksum_ok must be implicitly added here.
12113                  */
12114                 struct rte_flow_item_integrity local_item;
12115
12116                 local_item.l4_csum_ok = 1;
12117                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
12118                          local_item.l4_csum_ok);
12119                 if (value->l4_ok) {
12120                         /* application l4_ok = 1 matches sets both hw flags
12121                          * l4_ok and l4_checksum_ok flags to 1.
12122                          */
12123                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12124                                  l4_checksum_ok, local_item.l4_csum_ok);
12125                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok,
12126                                  mask->l4_ok);
12127                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok,
12128                                  value->l4_ok);
12129                 } else {
12130                         /* application l4_ok = 0 matches on hw flag
12131                          * l4_checksum_ok = 0 only.
12132                          */
12133                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12134                                  l4_checksum_ok, 0);
12135                 }
12136         } else if (mask->l4_csum_ok) {
12137                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
12138                          mask->l4_csum_ok);
12139                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
12140                          value->l4_csum_ok);
12141         }
12142 }
12143
12144 static void
12145 flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
12146                                const struct rte_flow_item_integrity *value,
12147                                void *headers_m, void *headers_v, bool is_ipv4)
12148 {
12149         if (mask->l3_ok) {
12150                 /* application l3_ok filter aggregates all hardware l3 filters
12151                  * therefore hw ipv4_checksum_ok must be implicitly added here.
12152                  */
12153                 struct rte_flow_item_integrity local_item;
12154
12155                 local_item.ipv4_csum_ok = !!is_ipv4;
12156                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
12157                          local_item.ipv4_csum_ok);
12158                 if (value->l3_ok) {
12159                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12160                                  ipv4_checksum_ok, local_item.ipv4_csum_ok);
12161                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok,
12162                                  mask->l3_ok);
12163                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
12164                                  value->l3_ok);
12165                 } else {
12166                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12167                                  ipv4_checksum_ok, 0);
12168                 }
12169         } else if (mask->ipv4_csum_ok) {
12170                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
12171                          mask->ipv4_csum_ok);
12172                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
12173                          value->ipv4_csum_ok);
12174         }
12175 }
12176
12177 static void
12178 set_integrity_bits(void *headers_m, void *headers_v,
12179                    const struct rte_flow_item *integrity_item, bool is_l3_ip4)
12180 {
12181         const struct rte_flow_item_integrity *spec = integrity_item->spec;
12182         const struct rte_flow_item_integrity *mask = integrity_item->mask;
12183
12184         /* Integrity bits validation cleared spec pointer */
12185         MLX5_ASSERT(spec != NULL);
12186         if (!mask)
12187                 mask = &rte_flow_item_integrity_mask;
12188         flow_dv_translate_integrity_l3(mask, spec, headers_m, headers_v,
12189                                        is_l3_ip4);
12190         flow_dv_translate_integrity_l4(mask, spec, headers_m, headers_v);
12191 }
12192
12193 static void
12194 flow_dv_translate_item_integrity_post(void *matcher, void *key,
12195                                       const
12196                                       struct rte_flow_item *integrity_items[2],
12197                                       uint64_t pattern_flags)
12198 {
12199         void *headers_m, *headers_v;
12200         bool is_l3_ip4;
12201
12202         if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) {
12203                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12204                                          inner_headers);
12205                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
12206                 is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4) !=
12207                             0;
12208                 set_integrity_bits(headers_m, headers_v,
12209                                    integrity_items[1], is_l3_ip4);
12210         }
12211         if (pattern_flags & MLX5_FLOW_ITEM_OUTER_INTEGRITY) {
12212                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12213                                          outer_headers);
12214                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
12215                 is_l3_ip4 = (pattern_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) !=
12216                             0;
12217                 set_integrity_bits(headers_m, headers_v,
12218                                    integrity_items[0], is_l3_ip4);
12219         }
12220 }
12221
12222 static void
12223 flow_dv_translate_item_integrity(const struct rte_flow_item *item,
12224                                  const struct rte_flow_item *integrity_items[2],
12225                                  uint64_t *last_item)
12226 {
12227         const struct rte_flow_item_integrity *spec = (typeof(spec))item->spec;
12228
12229         /* integrity bits validation cleared spec pointer */
12230         MLX5_ASSERT(spec != NULL);
12231         if (spec->level > 1) {
12232                 integrity_items[1] = item;
12233                 *last_item |= MLX5_FLOW_ITEM_INNER_INTEGRITY;
12234         } else {
12235                 integrity_items[0] = item;
12236                 *last_item |= MLX5_FLOW_ITEM_OUTER_INTEGRITY;
12237         }
12238 }
12239
12240 /**
12241  * Prepares DV flow counter with aging configuration.
12242  * Gets it by index when exists, creates a new one when doesn't.
12243  *
12244  * @param[in] dev
12245  *   Pointer to rte_eth_dev structure.
12246  * @param[in] dev_flow
12247  *   Pointer to the mlx5_flow.
12248  * @param[in, out] flow
12249  *   Pointer to the sub flow.
12250  * @param[in] count
12251  *   Pointer to the counter action configuration.
12252  * @param[in] age
12253  *   Pointer to the aging action configuration.
12254  * @param[out] error
12255  *   Pointer to the error structure.
12256  *
12257  * @return
12258  *   Pointer to the counter, NULL otherwise.
12259  */
12260 static struct mlx5_flow_counter *
12261 flow_dv_prepare_counter(struct rte_eth_dev *dev,
12262                         struct mlx5_flow *dev_flow,
12263                         struct rte_flow *flow,
12264                         const struct rte_flow_action_count *count,
12265                         const struct rte_flow_action_age *age,
12266                         struct rte_flow_error *error)
12267 {
12268         if (!flow->counter) {
12269                 flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
12270                                                                  count, age);
12271                 if (!flow->counter) {
12272                         rte_flow_error_set(error, rte_errno,
12273                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12274                                            "cannot create counter object.");
12275                         return NULL;
12276                 }
12277         }
12278         return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
12279 }
12280
12281 /*
12282  * Release an ASO CT action by its own device.
12283  *
12284  * @param[in] dev
12285  *   Pointer to the Ethernet device structure.
12286  * @param[in] idx
12287  *   Index of ASO CT action to release.
12288  *
12289  * @return
12290  *   0 when CT action was removed, otherwise the number of references.
12291  */
12292 static inline int
12293 flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx)
12294 {
12295         struct mlx5_priv *priv = dev->data->dev_private;
12296         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12297         uint32_t ret;
12298         struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12299         enum mlx5_aso_ct_state state =
12300                         __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
12301
12302         /* Cannot release when CT is in the ASO SQ. */
12303         if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
12304                 return -1;
12305         ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED);
12306         if (!ret) {
12307                 if (ct->dr_action_orig) {
12308 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12309                         claim_zero(mlx5_glue->destroy_flow_action
12310                                         (ct->dr_action_orig));
12311 #endif
12312                         ct->dr_action_orig = NULL;
12313                 }
12314                 if (ct->dr_action_rply) {
12315 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12316                         claim_zero(mlx5_glue->destroy_flow_action
12317                                         (ct->dr_action_rply));
12318 #endif
12319                         ct->dr_action_rply = NULL;
12320                 }
12321                 /* Clear the state to free, no need in 1st allocation. */
12322                 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE);
12323                 rte_spinlock_lock(&mng->ct_sl);
12324                 LIST_INSERT_HEAD(&mng->free_cts, ct, next);
12325                 rte_spinlock_unlock(&mng->ct_sl);
12326         }
12327         return (int)ret;
12328 }
12329
12330 static inline int
12331 flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx,
12332                        struct rte_flow_error *error)
12333 {
12334         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
12335         uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
12336         struct rte_eth_dev *owndev = &rte_eth_devices[owner];
12337         int ret;
12338
12339         MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
12340         if (dev->data->dev_started != 1)
12341                 return rte_flow_error_set(error, EAGAIN,
12342                                           RTE_FLOW_ERROR_TYPE_ACTION,
12343                                           NULL,
12344                                           "Indirect CT action cannot be destroyed when the port is stopped");
12345         ret = flow_dv_aso_ct_dev_release(owndev, idx);
12346         if (ret < 0)
12347                 return rte_flow_error_set(error, EAGAIN,
12348                                           RTE_FLOW_ERROR_TYPE_ACTION,
12349                                           NULL,
12350                                           "Current state prevents indirect CT action from being destroyed");
12351         return ret;
12352 }
12353
12354 /*
12355  * Resize the ASO CT pools array by 64 pools.
12356  *
12357  * @param[in] dev
12358  *   Pointer to the Ethernet device structure.
12359  *
12360  * @return
12361  *   0 on success, otherwise negative errno value and rte_errno is set.
12362  */
12363 static int
12364 flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev)
12365 {
12366         struct mlx5_priv *priv = dev->data->dev_private;
12367         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12368         void *old_pools = mng->pools;
12369         /* Magic number now, need a macro. */
12370         uint32_t resize = mng->n + 64;
12371         uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize;
12372         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
12373
12374         if (!pools) {
12375                 rte_errno = ENOMEM;
12376                 return -rte_errno;
12377         }
12378         rte_rwlock_write_lock(&mng->resize_rwl);
12379         /* ASO SQ/QP was already initialized in the startup. */
12380         if (old_pools) {
12381                 /* Realloc could be an alternative choice. */
12382                 rte_memcpy(pools, old_pools,
12383                            mng->n * sizeof(struct mlx5_aso_ct_pool *));
12384                 mlx5_free(old_pools);
12385         }
12386         mng->n = resize;
12387         mng->pools = pools;
12388         rte_rwlock_write_unlock(&mng->resize_rwl);
12389         return 0;
12390 }
12391
12392 /*
12393  * Create and initialize a new ASO CT pool.
12394  *
12395  * @param[in] dev
12396  *   Pointer to the Ethernet device structure.
12397  * @param[out] ct_free
12398  *   Where to put the pointer of a new CT action.
12399  *
12400  * @return
12401  *   The CT actions pool pointer and @p ct_free is set on success,
12402  *   NULL otherwise and rte_errno is set.
12403  */
12404 static struct mlx5_aso_ct_pool *
12405 flow_dv_ct_pool_create(struct rte_eth_dev *dev,
12406                        struct mlx5_aso_ct_action **ct_free)
12407 {
12408         struct mlx5_priv *priv = dev->data->dev_private;
12409         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12410         struct mlx5_aso_ct_pool *pool = NULL;
12411         struct mlx5_devx_obj *obj = NULL;
12412         uint32_t i;
12413         uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
12414
12415         obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
12416                                                           priv->sh->cdev->pdn,
12417                                                           log_obj_size);
12418         if (!obj) {
12419                 rte_errno = ENODATA;
12420                 DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
12421                 return NULL;
12422         }
12423         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
12424         if (!pool) {
12425                 rte_errno = ENOMEM;
12426                 claim_zero(mlx5_devx_cmd_destroy(obj));
12427                 return NULL;
12428         }
12429         pool->devx_obj = obj;
12430         pool->index = mng->next;
12431         /* Resize pools array if there is no room for the new pool in it. */
12432         if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) {
12433                 claim_zero(mlx5_devx_cmd_destroy(obj));
12434                 mlx5_free(pool);
12435                 return NULL;
12436         }
12437         mng->pools[pool->index] = pool;
12438         mng->next++;
12439         /* Assign the first action in the new pool, the rest go to free list. */
12440         *ct_free = &pool->actions[0];
12441         /* Lock outside, the list operation is safe here. */
12442         for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
12443                 /* refcnt is 0 when allocating the memory. */
12444                 pool->actions[i].offset = i;
12445                 LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next);
12446         }
12447         return pool;
12448 }
12449
12450 /*
12451  * Allocate a ASO CT action from free list.
12452  *
12453  * @param[in] dev
12454  *   Pointer to the Ethernet device structure.
12455  * @param[out] error
12456  *   Pointer to the error structure.
12457  *
12458  * @return
12459  *   Index to ASO CT action on success, 0 otherwise and rte_errno is set.
12460  */
12461 static uint32_t
12462 flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12463 {
12464         struct mlx5_priv *priv = dev->data->dev_private;
12465         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12466         struct mlx5_aso_ct_action *ct = NULL;
12467         struct mlx5_aso_ct_pool *pool;
12468         uint8_t reg_c;
12469         uint32_t ct_idx;
12470
12471         MLX5_ASSERT(mng);
12472         if (!priv->sh->devx) {
12473                 rte_errno = ENOTSUP;
12474                 return 0;
12475         }
12476         /* Get a free CT action, if no, a new pool will be created. */
12477         rte_spinlock_lock(&mng->ct_sl);
12478         ct = LIST_FIRST(&mng->free_cts);
12479         if (ct) {
12480                 LIST_REMOVE(ct, next);
12481         } else if (!flow_dv_ct_pool_create(dev, &ct)) {
12482                 rte_spinlock_unlock(&mng->ct_sl);
12483                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12484                                    NULL, "failed to create ASO CT pool");
12485                 return 0;
12486         }
12487         rte_spinlock_unlock(&mng->ct_sl);
12488         pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
12489         ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
12490         /* 0: inactive, 1: created, 2+: used by flows. */
12491         __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
12492         reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
12493         if (!ct->dr_action_orig) {
12494 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12495                 ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso
12496                         (priv->sh->rx_domain, pool->devx_obj->obj,
12497                          ct->offset,
12498                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR,
12499                          reg_c - REG_C_0);
12500 #else
12501                 RTE_SET_USED(reg_c);
12502 #endif
12503                 if (!ct->dr_action_orig) {
12504                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12505                         rte_flow_error_set(error, rte_errno,
12506                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12507                                            "failed to create ASO CT action");
12508                         return 0;
12509                 }
12510         }
12511         if (!ct->dr_action_rply) {
12512 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12513                 ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso
12514                         (priv->sh->rx_domain, pool->devx_obj->obj,
12515                          ct->offset,
12516                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER,
12517                          reg_c - REG_C_0);
12518 #endif
12519                 if (!ct->dr_action_rply) {
12520                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12521                         rte_flow_error_set(error, rte_errno,
12522                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12523                                            "failed to create ASO CT action");
12524                         return 0;
12525                 }
12526         }
12527         return ct_idx;
12528 }
12529
12530 /*
12531  * Create a conntrack object with context and actions by using ASO mechanism.
12532  *
12533  * @param[in] dev
12534  *   Pointer to rte_eth_dev structure.
12535  * @param[in] pro
12536  *   Pointer to conntrack information profile.
12537  * @param[out] error
12538  *   Pointer to the error structure.
12539  *
12540  * @return
12541  *   Index to conntrack object on success, 0 otherwise.
12542  */
12543 static uint32_t
12544 flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
12545                                    const struct rte_flow_action_conntrack *pro,
12546                                    struct rte_flow_error *error)
12547 {
12548         struct mlx5_priv *priv = dev->data->dev_private;
12549         struct mlx5_dev_ctx_shared *sh = priv->sh;
12550         struct mlx5_aso_ct_action *ct;
12551         uint32_t idx;
12552
12553         if (!sh->ct_aso_en)
12554                 return rte_flow_error_set(error, ENOTSUP,
12555                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12556                                           "Connection is not supported");
12557         idx = flow_dv_aso_ct_alloc(dev, error);
12558         if (!idx)
12559                 return rte_flow_error_set(error, rte_errno,
12560                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12561                                           "Failed to allocate CT object");
12562         ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12563         if (mlx5_aso_ct_update_by_wqe(sh, ct, pro))
12564                 return rte_flow_error_set(error, EBUSY,
12565                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12566                                           "Failed to update CT");
12567         ct->is_original = !!pro->is_original_dir;
12568         ct->peer = pro->peer_port;
12569         return idx;
12570 }
12571
12572 /**
12573  * Fill the flow with DV spec, lock free
12574  * (mutex should be acquired by caller).
12575  *
12576  * @param[in] dev
12577  *   Pointer to rte_eth_dev structure.
12578  * @param[in, out] dev_flow
12579  *   Pointer to the sub flow.
12580  * @param[in] attr
12581  *   Pointer to the flow attributes.
12582  * @param[in] items
12583  *   Pointer to the list of items.
12584  * @param[in] actions
12585  *   Pointer to the list of actions.
12586  * @param[out] error
12587  *   Pointer to the error structure.
12588  *
12589  * @return
12590  *   0 on success, a negative errno value otherwise and rte_errno is set.
12591  */
12592 static int
12593 flow_dv_translate(struct rte_eth_dev *dev,
12594                   struct mlx5_flow *dev_flow,
12595                   const struct rte_flow_attr *attr,
12596                   const struct rte_flow_item items[],
12597                   const struct rte_flow_action actions[],
12598                   struct rte_flow_error *error)
12599 {
12600         struct mlx5_priv *priv = dev->data->dev_private;
12601         struct mlx5_dev_config *dev_conf = &priv->config;
12602         struct rte_flow *flow = dev_flow->flow;
12603         struct mlx5_flow_handle *handle = dev_flow->handle;
12604         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12605         struct mlx5_flow_rss_desc *rss_desc;
12606         uint64_t item_flags = 0;
12607         uint64_t last_item = 0;
12608         uint64_t action_flags = 0;
12609         struct mlx5_flow_dv_matcher matcher = {
12610                 .mask = {
12611                         .size = sizeof(matcher.mask.buf),
12612                 },
12613         };
12614         int actions_n = 0;
12615         bool actions_end = false;
12616         union {
12617                 struct mlx5_flow_dv_modify_hdr_resource res;
12618                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
12619                             sizeof(struct mlx5_modification_cmd) *
12620                             (MLX5_MAX_MODIFY_NUM + 1)];
12621         } mhdr_dummy;
12622         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
12623         const struct rte_flow_action_count *count = NULL;
12624         const struct rte_flow_action_age *non_shared_age = NULL;
12625         union flow_dv_attr flow_attr = { .attr = 0 };
12626         uint32_t tag_be;
12627         union mlx5_flow_tbl_key tbl_key;
12628         uint32_t modify_action_position = UINT32_MAX;
12629         void *match_mask = matcher.mask.buf;
12630         void *match_value = dev_flow->dv.value.buf;
12631         uint8_t next_protocol = 0xff;
12632         struct rte_vlan_hdr vlan = { 0 };
12633         struct mlx5_flow_dv_dest_array_resource mdest_res;
12634         struct mlx5_flow_dv_sample_resource sample_res;
12635         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
12636         const struct rte_flow_action_sample *sample = NULL;
12637         struct mlx5_flow_sub_actions_list *sample_act;
12638         uint32_t sample_act_pos = UINT32_MAX;
12639         uint32_t age_act_pos = UINT32_MAX;
12640         uint32_t num_of_dest = 0;
12641         int tmp_actions_n = 0;
12642         uint32_t table;
12643         int ret = 0;
12644         const struct mlx5_flow_tunnel *tunnel = NULL;
12645         struct flow_grp_info grp_info = {
12646                 .external = !!dev_flow->external,
12647                 .transfer = !!attr->transfer,
12648                 .fdb_def_rule = !!priv->fdb_def_rule,
12649                 .skip_scale = dev_flow->skip_scale &
12650                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
12651                 .std_tbl_fix = true,
12652         };
12653         const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
12654
12655         if (!wks)
12656                 return rte_flow_error_set(error, ENOMEM,
12657                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12658                                           NULL,
12659                                           "failed to push flow workspace");
12660         rss_desc = &wks->rss_desc;
12661         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
12662         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
12663         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12664                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12665         /* update normal path action resource into last index of array */
12666         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
12667         if (is_tunnel_offload_active(dev)) {
12668                 if (dev_flow->tunnel) {
12669                         RTE_VERIFY(dev_flow->tof_type ==
12670                                    MLX5_TUNNEL_OFFLOAD_MISS_RULE);
12671                         tunnel = dev_flow->tunnel;
12672                 } else {
12673                         tunnel = mlx5_get_tof(items, actions,
12674                                               &dev_flow->tof_type);
12675                         dev_flow->tunnel = tunnel;
12676                 }
12677                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
12678                                         (dev, attr, tunnel, dev_flow->tof_type);
12679         }
12680         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12681                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12682         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
12683                                        &grp_info, error);
12684         if (ret)
12685                 return ret;
12686         dev_flow->dv.group = table;
12687         if (attr->transfer)
12688                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
12689         /* number of actions must be set to 0 in case of dirty stack. */
12690         mhdr_res->actions_num = 0;
12691         if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
12692                 /*
12693                  * do not add decap action if match rule drops packet
12694                  * HW rejects rules with decap & drop
12695                  *
12696                  * if tunnel match rule was inserted before matching tunnel set
12697                  * rule flow table used in the match rule must be registered.
12698                  * current implementation handles that in the
12699                  * flow_dv_match_register() at the function end.
12700                  */
12701                 bool add_decap = true;
12702                 const struct rte_flow_action *ptr = actions;
12703
12704                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
12705                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
12706                                 add_decap = false;
12707                                 break;
12708                         }
12709                 }
12710                 if (add_decap) {
12711                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12712                                                            attr->transfer,
12713                                                            error))
12714                                 return -rte_errno;
12715                         dev_flow->dv.actions[actions_n++] =
12716                                         dev_flow->dv.encap_decap->action;
12717                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12718                 }
12719         }
12720         for (; !actions_end ; actions++) {
12721                 const struct rte_flow_action_queue *queue;
12722                 const struct rte_flow_action_rss *rss;
12723                 const struct rte_flow_action *action = actions;
12724                 const uint8_t *rss_key;
12725                 struct mlx5_flow_tbl_resource *tbl;
12726                 struct mlx5_aso_age_action *age_act;
12727                 struct mlx5_flow_counter *cnt_act;
12728                 uint32_t port_id = 0;
12729                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
12730                 int action_type = actions->type;
12731                 const struct rte_flow_action *found_action = NULL;
12732                 uint32_t jump_group = 0;
12733                 uint32_t owner_idx;
12734                 struct mlx5_aso_ct_action *ct;
12735
12736                 if (!mlx5_flow_os_action_supported(action_type))
12737                         return rte_flow_error_set(error, ENOTSUP,
12738                                                   RTE_FLOW_ERROR_TYPE_ACTION,
12739                                                   actions,
12740                                                   "action not supported");
12741                 switch (action_type) {
12742                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
12743                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
12744                         break;
12745                 case RTE_FLOW_ACTION_TYPE_VOID:
12746                         break;
12747                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
12748                 case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
12749                         if (flow_dv_translate_action_port_id(dev, action,
12750                                                              &port_id, error))
12751                                 return -rte_errno;
12752                         port_id_resource.port_id = port_id;
12753                         MLX5_ASSERT(!handle->rix_port_id_action);
12754                         if (flow_dv_port_id_action_resource_register
12755                             (dev, &port_id_resource, dev_flow, error))
12756                                 return -rte_errno;
12757                         dev_flow->dv.actions[actions_n++] =
12758                                         dev_flow->dv.port_id_action->action;
12759                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12760                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
12761                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12762                         num_of_dest++;
12763                         break;
12764                 case RTE_FLOW_ACTION_TYPE_FLAG:
12765                         action_flags |= MLX5_FLOW_ACTION_FLAG;
12766                         dev_flow->handle->mark = 1;
12767                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12768                                 struct rte_flow_action_mark mark = {
12769                                         .id = MLX5_FLOW_MARK_DEFAULT,
12770                                 };
12771
12772                                 if (flow_dv_convert_action_mark(dev, &mark,
12773                                                                 mhdr_res,
12774                                                                 error))
12775                                         return -rte_errno;
12776                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12777                                 break;
12778                         }
12779                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
12780                         /*
12781                          * Only one FLAG or MARK is supported per device flow
12782                          * right now. So the pointer to the tag resource must be
12783                          * zero before the register process.
12784                          */
12785                         MLX5_ASSERT(!handle->dvh.rix_tag);
12786                         if (flow_dv_tag_resource_register(dev, tag_be,
12787                                                           dev_flow, error))
12788                                 return -rte_errno;
12789                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12790                         dev_flow->dv.actions[actions_n++] =
12791                                         dev_flow->dv.tag_resource->action;
12792                         break;
12793                 case RTE_FLOW_ACTION_TYPE_MARK:
12794                         action_flags |= MLX5_FLOW_ACTION_MARK;
12795                         dev_flow->handle->mark = 1;
12796                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12797                                 const struct rte_flow_action_mark *mark =
12798                                         (const struct rte_flow_action_mark *)
12799                                                 actions->conf;
12800
12801                                 if (flow_dv_convert_action_mark(dev, mark,
12802                                                                 mhdr_res,
12803                                                                 error))
12804                                         return -rte_errno;
12805                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12806                                 break;
12807                         }
12808                         /* Fall-through */
12809                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
12810                         /* Legacy (non-extensive) MARK action. */
12811                         tag_be = mlx5_flow_mark_set
12812                               (((const struct rte_flow_action_mark *)
12813                                (actions->conf))->id);
12814                         MLX5_ASSERT(!handle->dvh.rix_tag);
12815                         if (flow_dv_tag_resource_register(dev, tag_be,
12816                                                           dev_flow, error))
12817                                 return -rte_errno;
12818                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12819                         dev_flow->dv.actions[actions_n++] =
12820                                         dev_flow->dv.tag_resource->action;
12821                         break;
12822                 case RTE_FLOW_ACTION_TYPE_SET_META:
12823                         if (flow_dv_convert_action_set_meta
12824                                 (dev, mhdr_res, attr,
12825                                  (const struct rte_flow_action_set_meta *)
12826                                   actions->conf, error))
12827                                 return -rte_errno;
12828                         action_flags |= MLX5_FLOW_ACTION_SET_META;
12829                         break;
12830                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
12831                         if (flow_dv_convert_action_set_tag
12832                                 (dev, mhdr_res,
12833                                  (const struct rte_flow_action_set_tag *)
12834                                   actions->conf, error))
12835                                 return -rte_errno;
12836                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12837                         break;
12838                 case RTE_FLOW_ACTION_TYPE_DROP:
12839                         action_flags |= MLX5_FLOW_ACTION_DROP;
12840                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
12841                         break;
12842                 case RTE_FLOW_ACTION_TYPE_QUEUE:
12843                         queue = actions->conf;
12844                         rss_desc->queue_num = 1;
12845                         rss_desc->queue[0] = queue->index;
12846                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
12847                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
12848                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
12849                         num_of_dest++;
12850                         break;
12851                 case RTE_FLOW_ACTION_TYPE_RSS:
12852                         rss = actions->conf;
12853                         memcpy(rss_desc->queue, rss->queue,
12854                                rss->queue_num * sizeof(uint16_t));
12855                         rss_desc->queue_num = rss->queue_num;
12856                         /* NULL RSS key indicates default RSS key. */
12857                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
12858                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12859                         /*
12860                          * rss->level and rss.types should be set in advance
12861                          * when expanding items for RSS.
12862                          */
12863                         action_flags |= MLX5_FLOW_ACTION_RSS;
12864                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
12865                                 MLX5_FLOW_FATE_SHARED_RSS :
12866                                 MLX5_FLOW_FATE_QUEUE;
12867                         break;
12868                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
12869                         owner_idx = (uint32_t)(uintptr_t)action->conf;
12870                         age_act = flow_aso_age_get_by_idx(dev, owner_idx);
12871                         if (flow->age == 0) {
12872                                 flow->age = owner_idx;
12873                                 __atomic_fetch_add(&age_act->refcnt, 1,
12874                                                    __ATOMIC_RELAXED);
12875                         }
12876                         age_act_pos = actions_n++;
12877                         action_flags |= MLX5_FLOW_ACTION_AGE;
12878                         break;
12879                 case RTE_FLOW_ACTION_TYPE_AGE:
12880                         non_shared_age = action->conf;
12881                         age_act_pos = actions_n++;
12882                         action_flags |= MLX5_FLOW_ACTION_AGE;
12883                         break;
12884                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
12885                         owner_idx = (uint32_t)(uintptr_t)action->conf;
12886                         cnt_act = flow_dv_counter_get_by_idx(dev, owner_idx,
12887                                                              NULL);
12888                         MLX5_ASSERT(cnt_act != NULL);
12889                         /**
12890                          * When creating meter drop flow in drop table, the
12891                          * counter should not overwrite the rte flow counter.
12892                          */
12893                         if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
12894                             dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP) {
12895                                 dev_flow->dv.actions[actions_n++] =
12896                                                         cnt_act->action;
12897                         } else {
12898                                 if (flow->counter == 0) {
12899                                         flow->counter = owner_idx;
12900                                         __atomic_fetch_add
12901                                                 (&cnt_act->shared_info.refcnt,
12902                                                  1, __ATOMIC_RELAXED);
12903                                 }
12904                                 /* Save information first, will apply later. */
12905                                 action_flags |= MLX5_FLOW_ACTION_COUNT;
12906                         }
12907                         break;
12908                 case RTE_FLOW_ACTION_TYPE_COUNT:
12909                         if (!priv->sh->devx) {
12910                                 return rte_flow_error_set
12911                                               (error, ENOTSUP,
12912                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12913                                                NULL,
12914                                                "count action not supported");
12915                         }
12916                         /* Save information first, will apply later. */
12917                         count = action->conf;
12918                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12919                         break;
12920                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
12921                         dev_flow->dv.actions[actions_n++] =
12922                                                 priv->sh->pop_vlan_action;
12923                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
12924                         break;
12925                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
12926                         if (!(action_flags &
12927                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
12928                                 flow_dev_get_vlan_info_from_items(items, &vlan);
12929                         vlan.eth_proto = rte_be_to_cpu_16
12930                              ((((const struct rte_flow_action_of_push_vlan *)
12931                                                    actions->conf)->ethertype));
12932                         found_action = mlx5_flow_find_action
12933                                         (actions + 1,
12934                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
12935                         if (found_action)
12936                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12937                         found_action = mlx5_flow_find_action
12938                                         (actions + 1,
12939                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
12940                         if (found_action)
12941                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12942                         if (flow_dv_create_action_push_vlan
12943                                             (dev, attr, &vlan, dev_flow, error))
12944                                 return -rte_errno;
12945                         dev_flow->dv.actions[actions_n++] =
12946                                         dev_flow->dv.push_vlan_res->action;
12947                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
12948                         break;
12949                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
12950                         /* of_vlan_push action handled this action */
12951                         MLX5_ASSERT(action_flags &
12952                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
12953                         break;
12954                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
12955                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
12956                                 break;
12957                         flow_dev_get_vlan_info_from_items(items, &vlan);
12958                         mlx5_update_vlan_vid_pcp(actions, &vlan);
12959                         /* If no VLAN push - this is a modify header action */
12960                         if (flow_dv_convert_action_modify_vlan_vid
12961                                                 (mhdr_res, actions, error))
12962                                 return -rte_errno;
12963                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
12964                         break;
12965                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
12966                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
12967                         if (flow_dv_create_action_l2_encap(dev, actions,
12968                                                            dev_flow,
12969                                                            attr->transfer,
12970                                                            error))
12971                                 return -rte_errno;
12972                         dev_flow->dv.actions[actions_n++] =
12973                                         dev_flow->dv.encap_decap->action;
12974                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12975                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12976                                 sample_act->action_flags |=
12977                                                         MLX5_FLOW_ACTION_ENCAP;
12978                         break;
12979                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
12980                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
12981                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12982                                                            attr->transfer,
12983                                                            error))
12984                                 return -rte_errno;
12985                         dev_flow->dv.actions[actions_n++] =
12986                                         dev_flow->dv.encap_decap->action;
12987                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12988                         break;
12989                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
12990                         /* Handle encap with preceding decap. */
12991                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
12992                                 if (flow_dv_create_action_raw_encap
12993                                         (dev, actions, dev_flow, attr, error))
12994                                         return -rte_errno;
12995                                 dev_flow->dv.actions[actions_n++] =
12996                                         dev_flow->dv.encap_decap->action;
12997                         } else {
12998                                 /* Handle encap without preceding decap. */
12999                                 if (flow_dv_create_action_l2_encap
13000                                     (dev, actions, dev_flow, attr->transfer,
13001                                      error))
13002                                         return -rte_errno;
13003                                 dev_flow->dv.actions[actions_n++] =
13004                                         dev_flow->dv.encap_decap->action;
13005                         }
13006                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
13007                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
13008                                 sample_act->action_flags |=
13009                                                         MLX5_FLOW_ACTION_ENCAP;
13010                         break;
13011                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
13012                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
13013                                 ;
13014                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
13015                                 if (flow_dv_create_action_l2_decap
13016                                     (dev, dev_flow, attr->transfer, error))
13017                                         return -rte_errno;
13018                                 dev_flow->dv.actions[actions_n++] =
13019                                         dev_flow->dv.encap_decap->action;
13020                         }
13021                         /* If decap is followed by encap, handle it at encap. */
13022                         action_flags |= MLX5_FLOW_ACTION_DECAP;
13023                         break;
13024                 case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
13025                         dev_flow->dv.actions[actions_n++] =
13026                                 (void *)(uintptr_t)action->conf;
13027                         action_flags |= MLX5_FLOW_ACTION_JUMP;
13028                         break;
13029                 case RTE_FLOW_ACTION_TYPE_JUMP:
13030                         jump_group = ((const struct rte_flow_action_jump *)
13031                                                         action->conf)->group;
13032                         grp_info.std_tbl_fix = 0;
13033                         if (dev_flow->skip_scale &
13034                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
13035                                 grp_info.skip_scale = 1;
13036                         else
13037                                 grp_info.skip_scale = 0;
13038                         ret = mlx5_flow_group_to_table(dev, tunnel,
13039                                                        jump_group,
13040                                                        &table,
13041                                                        &grp_info, error);
13042                         if (ret)
13043                                 return ret;
13044                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
13045                                                        attr->transfer,
13046                                                        !!dev_flow->external,
13047                                                        tunnel, jump_group, 0,
13048                                                        0, error);
13049                         if (!tbl)
13050                                 return rte_flow_error_set
13051                                                 (error, errno,
13052                                                  RTE_FLOW_ERROR_TYPE_ACTION,
13053                                                  NULL,
13054                                                  "cannot create jump action.");
13055                         if (flow_dv_jump_tbl_resource_register
13056                             (dev, tbl, dev_flow, error)) {
13057                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
13058                                 return rte_flow_error_set
13059                                                 (error, errno,
13060                                                  RTE_FLOW_ERROR_TYPE_ACTION,
13061                                                  NULL,
13062                                                  "cannot create jump action.");
13063                         }
13064                         dev_flow->dv.actions[actions_n++] =
13065                                         dev_flow->dv.jump->action;
13066                         action_flags |= MLX5_FLOW_ACTION_JUMP;
13067                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
13068                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
13069                         num_of_dest++;
13070                         break;
13071                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
13072                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
13073                         if (flow_dv_convert_action_modify_mac
13074                                         (mhdr_res, actions, error))
13075                                 return -rte_errno;
13076                         action_flags |= actions->type ==
13077                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
13078                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
13079                                         MLX5_FLOW_ACTION_SET_MAC_DST;
13080                         break;
13081                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
13082                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
13083                         if (flow_dv_convert_action_modify_ipv4
13084                                         (mhdr_res, actions, error))
13085                                 return -rte_errno;
13086                         action_flags |= actions->type ==
13087                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
13088                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
13089                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
13090                         break;
13091                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
13092                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
13093                         if (flow_dv_convert_action_modify_ipv6
13094                                         (mhdr_res, actions, error))
13095                                 return -rte_errno;
13096                         action_flags |= actions->type ==
13097                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
13098                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
13099                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
13100                         break;
13101                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
13102                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
13103                         if (flow_dv_convert_action_modify_tp
13104                                         (mhdr_res, actions, items,
13105                                          &flow_attr, dev_flow, !!(action_flags &
13106                                          MLX5_FLOW_ACTION_DECAP), error))
13107                                 return -rte_errno;
13108                         action_flags |= actions->type ==
13109                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
13110                                         MLX5_FLOW_ACTION_SET_TP_SRC :
13111                                         MLX5_FLOW_ACTION_SET_TP_DST;
13112                         break;
13113                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
13114                         if (flow_dv_convert_action_modify_dec_ttl
13115                                         (mhdr_res, items, &flow_attr, dev_flow,
13116                                          !!(action_flags &
13117                                          MLX5_FLOW_ACTION_DECAP), error))
13118                                 return -rte_errno;
13119                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
13120                         break;
13121                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
13122                         if (flow_dv_convert_action_modify_ttl
13123                                         (mhdr_res, actions, items, &flow_attr,
13124                                          dev_flow, !!(action_flags &
13125                                          MLX5_FLOW_ACTION_DECAP), error))
13126                                 return -rte_errno;
13127                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
13128                         break;
13129                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
13130                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
13131                         if (flow_dv_convert_action_modify_tcp_seq
13132                                         (mhdr_res, actions, error))
13133                                 return -rte_errno;
13134                         action_flags |= actions->type ==
13135                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
13136                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
13137                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
13138                         break;
13139
13140                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
13141                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
13142                         if (flow_dv_convert_action_modify_tcp_ack
13143                                         (mhdr_res, actions, error))
13144                                 return -rte_errno;
13145                         action_flags |= actions->type ==
13146                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
13147                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
13148                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
13149                         break;
13150                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
13151                         if (flow_dv_convert_action_set_reg
13152                                         (mhdr_res, actions, error))
13153                                 return -rte_errno;
13154                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13155                         break;
13156                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
13157                         if (flow_dv_convert_action_copy_mreg
13158                                         (dev, mhdr_res, actions, error))
13159                                 return -rte_errno;
13160                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
13161                         break;
13162                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
13163                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
13164                         dev_flow->handle->fate_action =
13165                                         MLX5_FLOW_FATE_DEFAULT_MISS;
13166                         break;
13167                 case RTE_FLOW_ACTION_TYPE_METER:
13168                         if (!wks->fm)
13169                                 return rte_flow_error_set(error, rte_errno,
13170                                         RTE_FLOW_ERROR_TYPE_ACTION,
13171                                         NULL, "Failed to get meter in flow.");
13172                         /* Set the meter action. */
13173                         dev_flow->dv.actions[actions_n++] =
13174                                 wks->fm->meter_action;
13175                         action_flags |= MLX5_FLOW_ACTION_METER;
13176                         break;
13177                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
13178                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
13179                                                               actions, error))
13180                                 return -rte_errno;
13181                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
13182                         break;
13183                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
13184                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
13185                                                               actions, error))
13186                                 return -rte_errno;
13187                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
13188                         break;
13189                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
13190                         sample_act_pos = actions_n;
13191                         sample = (const struct rte_flow_action_sample *)
13192                                  action->conf;
13193                         actions_n++;
13194                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
13195                         /* put encap action into group if work with port id */
13196                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
13197                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
13198                                 sample_act->action_flags |=
13199                                                         MLX5_FLOW_ACTION_ENCAP;
13200                         break;
13201                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
13202                         if (flow_dv_convert_action_modify_field
13203                                         (dev, mhdr_res, actions, attr, error))
13204                                 return -rte_errno;
13205                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
13206                         break;
13207                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
13208                         owner_idx = (uint32_t)(uintptr_t)action->conf;
13209                         ct = flow_aso_ct_get_by_idx(dev, owner_idx);
13210                         if (!ct)
13211                                 return rte_flow_error_set(error, EINVAL,
13212                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13213                                                 NULL,
13214                                                 "Failed to get CT object.");
13215                         if (mlx5_aso_ct_available(priv->sh, ct))
13216                                 return rte_flow_error_set(error, rte_errno,
13217                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13218                                                 NULL,
13219                                                 "CT is unavailable.");
13220                         if (ct->is_original)
13221                                 dev_flow->dv.actions[actions_n] =
13222                                                         ct->dr_action_orig;
13223                         else
13224                                 dev_flow->dv.actions[actions_n] =
13225                                                         ct->dr_action_rply;
13226                         if (flow->ct == 0) {
13227                                 flow->indirect_type =
13228                                                 MLX5_INDIRECT_ACTION_TYPE_CT;
13229                                 flow->ct = owner_idx;
13230                                 __atomic_fetch_add(&ct->refcnt, 1,
13231                                                    __ATOMIC_RELAXED);
13232                         }
13233                         actions_n++;
13234                         action_flags |= MLX5_FLOW_ACTION_CT;
13235                         break;
13236                 case RTE_FLOW_ACTION_TYPE_END:
13237                         actions_end = true;
13238                         if (mhdr_res->actions_num) {
13239                                 /* create modify action if needed. */
13240                                 if (flow_dv_modify_hdr_resource_register
13241                                         (dev, mhdr_res, dev_flow, error))
13242                                         return -rte_errno;
13243                                 dev_flow->dv.actions[modify_action_position] =
13244                                         handle->dvh.modify_hdr->action;
13245                         }
13246                         /*
13247                          * Handle AGE and COUNT action by single HW counter
13248                          * when they are not shared.
13249                          */
13250                         if (action_flags & MLX5_FLOW_ACTION_AGE) {
13251                                 if ((non_shared_age && count) ||
13252                                     !(priv->sh->flow_hit_aso_en &&
13253                                       (attr->group || attr->transfer))) {
13254                                         /* Creates age by counters. */
13255                                         cnt_act = flow_dv_prepare_counter
13256                                                                 (dev, dev_flow,
13257                                                                  flow, count,
13258                                                                  non_shared_age,
13259                                                                  error);
13260                                         if (!cnt_act)
13261                                                 return -rte_errno;
13262                                         dev_flow->dv.actions[age_act_pos] =
13263                                                                 cnt_act->action;
13264                                         break;
13265                                 }
13266                                 if (!flow->age && non_shared_age) {
13267                                         flow->age = flow_dv_aso_age_alloc
13268                                                                 (dev, error);
13269                                         if (!flow->age)
13270                                                 return -rte_errno;
13271                                         flow_dv_aso_age_params_init
13272                                                     (dev, flow->age,
13273                                                      non_shared_age->context ?
13274                                                      non_shared_age->context :
13275                                                      (void *)(uintptr_t)
13276                                                      (dev_flow->flow_idx),
13277                                                      non_shared_age->timeout);
13278                                 }
13279                                 age_act = flow_aso_age_get_by_idx(dev,
13280                                                                   flow->age);
13281                                 dev_flow->dv.actions[age_act_pos] =
13282                                                              age_act->dr_action;
13283                         }
13284                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
13285                                 /*
13286                                  * Create one count action, to be used
13287                                  * by all sub-flows.
13288                                  */
13289                                 cnt_act = flow_dv_prepare_counter(dev, dev_flow,
13290                                                                   flow, count,
13291                                                                   NULL, error);
13292                                 if (!cnt_act)
13293                                         return -rte_errno;
13294                                 dev_flow->dv.actions[actions_n++] =
13295                                                                 cnt_act->action;
13296                         }
13297                 default:
13298                         break;
13299                 }
13300                 if (mhdr_res->actions_num &&
13301                     modify_action_position == UINT32_MAX)
13302                         modify_action_position = actions_n++;
13303         }
13304         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
13305                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
13306                 int item_type = items->type;
13307
13308                 if (!mlx5_flow_os_item_supported(item_type))
13309                         return rte_flow_error_set(error, ENOTSUP,
13310                                                   RTE_FLOW_ERROR_TYPE_ITEM,
13311                                                   NULL, "item not supported");
13312                 switch (item_type) {
13313                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
13314                         flow_dv_translate_item_port_id
13315                                 (dev, match_mask, match_value, items, attr);
13316                         last_item = MLX5_FLOW_ITEM_PORT_ID;
13317                         break;
13318                 case RTE_FLOW_ITEM_TYPE_ETH:
13319                         flow_dv_translate_item_eth(match_mask, match_value,
13320                                                    items, tunnel,
13321                                                    dev_flow->dv.group);
13322                         matcher.priority = action_flags &
13323                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
13324                                         !dev_flow->external ?
13325                                         MLX5_PRIORITY_MAP_L3 :
13326                                         MLX5_PRIORITY_MAP_L2;
13327                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
13328                                              MLX5_FLOW_LAYER_OUTER_L2;
13329                         break;
13330                 case RTE_FLOW_ITEM_TYPE_VLAN:
13331                         flow_dv_translate_item_vlan(dev_flow,
13332                                                     match_mask, match_value,
13333                                                     items, tunnel,
13334                                                     dev_flow->dv.group);
13335                         matcher.priority = MLX5_PRIORITY_MAP_L2;
13336                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
13337                                               MLX5_FLOW_LAYER_INNER_VLAN) :
13338                                              (MLX5_FLOW_LAYER_OUTER_L2 |
13339                                               MLX5_FLOW_LAYER_OUTER_VLAN);
13340                         break;
13341                 case RTE_FLOW_ITEM_TYPE_IPV4:
13342                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13343                                                   &item_flags, &tunnel);
13344                         flow_dv_translate_item_ipv4(match_mask, match_value,
13345                                                     items, tunnel,
13346                                                     dev_flow->dv.group);
13347                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13348                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
13349                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
13350                         if (items->mask != NULL &&
13351                             ((const struct rte_flow_item_ipv4 *)
13352                              items->mask)->hdr.next_proto_id) {
13353                                 next_protocol =
13354                                         ((const struct rte_flow_item_ipv4 *)
13355                                          (items->spec))->hdr.next_proto_id;
13356                                 next_protocol &=
13357                                         ((const struct rte_flow_item_ipv4 *)
13358                                          (items->mask))->hdr.next_proto_id;
13359                         } else {
13360                                 /* Reset for inner layer. */
13361                                 next_protocol = 0xff;
13362                         }
13363                         break;
13364                 case RTE_FLOW_ITEM_TYPE_IPV6:
13365                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13366                                                   &item_flags, &tunnel);
13367                         flow_dv_translate_item_ipv6(match_mask, match_value,
13368                                                     items, tunnel,
13369                                                     dev_flow->dv.group);
13370                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13371                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
13372                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
13373                         if (items->mask != NULL &&
13374                             ((const struct rte_flow_item_ipv6 *)
13375                              items->mask)->hdr.proto) {
13376                                 next_protocol =
13377                                         ((const struct rte_flow_item_ipv6 *)
13378                                          items->spec)->hdr.proto;
13379                                 next_protocol &=
13380                                         ((const struct rte_flow_item_ipv6 *)
13381                                          items->mask)->hdr.proto;
13382                         } else {
13383                                 /* Reset for inner layer. */
13384                                 next_protocol = 0xff;
13385                         }
13386                         break;
13387                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
13388                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
13389                                                              match_value,
13390                                                              items, tunnel);
13391                         last_item = tunnel ?
13392                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
13393                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
13394                         if (items->mask != NULL &&
13395                             ((const struct rte_flow_item_ipv6_frag_ext *)
13396                              items->mask)->hdr.next_header) {
13397                                 next_protocol =
13398                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13399                                  items->spec)->hdr.next_header;
13400                                 next_protocol &=
13401                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13402                                  items->mask)->hdr.next_header;
13403                         } else {
13404                                 /* Reset for inner layer. */
13405                                 next_protocol = 0xff;
13406                         }
13407                         break;
13408                 case RTE_FLOW_ITEM_TYPE_TCP:
13409                         flow_dv_translate_item_tcp(match_mask, match_value,
13410                                                    items, tunnel);
13411                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13412                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
13413                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
13414                         break;
13415                 case RTE_FLOW_ITEM_TYPE_UDP:
13416                         flow_dv_translate_item_udp(match_mask, match_value,
13417                                                    items, tunnel);
13418                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13419                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
13420                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
13421                         break;
13422                 case RTE_FLOW_ITEM_TYPE_GRE:
13423                         flow_dv_translate_item_gre(match_mask, match_value,
13424                                                    items, tunnel);
13425                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13426                         last_item = MLX5_FLOW_LAYER_GRE;
13427                         break;
13428                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
13429                         flow_dv_translate_item_gre_key(match_mask,
13430                                                        match_value, items);
13431                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
13432                         break;
13433                 case RTE_FLOW_ITEM_TYPE_NVGRE:
13434                         flow_dv_translate_item_nvgre(match_mask, match_value,
13435                                                      items, tunnel);
13436                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13437                         last_item = MLX5_FLOW_LAYER_GRE;
13438                         break;
13439                 case RTE_FLOW_ITEM_TYPE_VXLAN:
13440                         flow_dv_translate_item_vxlan(dev, attr,
13441                                                      match_mask, match_value,
13442                                                      items, tunnel);
13443                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13444                         last_item = MLX5_FLOW_LAYER_VXLAN;
13445                         break;
13446                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
13447                         flow_dv_translate_item_vxlan_gpe(match_mask,
13448                                                          match_value, items,
13449                                                          tunnel);
13450                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13451                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
13452                         break;
13453                 case RTE_FLOW_ITEM_TYPE_GENEVE:
13454                         flow_dv_translate_item_geneve(match_mask, match_value,
13455                                                       items, tunnel);
13456                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13457                         last_item = MLX5_FLOW_LAYER_GENEVE;
13458                         break;
13459                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
13460                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
13461                                                           match_value,
13462                                                           items, error);
13463                         if (ret)
13464                                 return rte_flow_error_set(error, -ret,
13465                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13466                                         "cannot create GENEVE TLV option");
13467                         flow->geneve_tlv_option = 1;
13468                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
13469                         break;
13470                 case RTE_FLOW_ITEM_TYPE_MPLS:
13471                         flow_dv_translate_item_mpls(match_mask, match_value,
13472                                                     items, last_item, tunnel);
13473                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13474                         last_item = MLX5_FLOW_LAYER_MPLS;
13475                         break;
13476                 case RTE_FLOW_ITEM_TYPE_MARK:
13477                         flow_dv_translate_item_mark(dev, match_mask,
13478                                                     match_value, items);
13479                         last_item = MLX5_FLOW_ITEM_MARK;
13480                         break;
13481                 case RTE_FLOW_ITEM_TYPE_META:
13482                         flow_dv_translate_item_meta(dev, match_mask,
13483                                                     match_value, attr, items);
13484                         last_item = MLX5_FLOW_ITEM_METADATA;
13485                         break;
13486                 case RTE_FLOW_ITEM_TYPE_ICMP:
13487                         flow_dv_translate_item_icmp(match_mask, match_value,
13488                                                     items, tunnel);
13489                         last_item = MLX5_FLOW_LAYER_ICMP;
13490                         break;
13491                 case RTE_FLOW_ITEM_TYPE_ICMP6:
13492                         flow_dv_translate_item_icmp6(match_mask, match_value,
13493                                                       items, tunnel);
13494                         last_item = MLX5_FLOW_LAYER_ICMP6;
13495                         break;
13496                 case RTE_FLOW_ITEM_TYPE_TAG:
13497                         flow_dv_translate_item_tag(dev, match_mask,
13498                                                    match_value, items);
13499                         last_item = MLX5_FLOW_ITEM_TAG;
13500                         break;
13501                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
13502                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
13503                                                         match_value, items);
13504                         last_item = MLX5_FLOW_ITEM_TAG;
13505                         break;
13506                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
13507                         flow_dv_translate_item_tx_queue(dev, match_mask,
13508                                                         match_value,
13509                                                         items);
13510                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
13511                         break;
13512                 case RTE_FLOW_ITEM_TYPE_GTP:
13513                         flow_dv_translate_item_gtp(match_mask, match_value,
13514                                                    items, tunnel);
13515                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13516                         last_item = MLX5_FLOW_LAYER_GTP;
13517                         break;
13518                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
13519                         ret = flow_dv_translate_item_gtp_psc(match_mask,
13520                                                           match_value,
13521                                                           items);
13522                         if (ret)
13523                                 return rte_flow_error_set(error, -ret,
13524                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13525                                         "cannot create GTP PSC item");
13526                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
13527                         break;
13528                 case RTE_FLOW_ITEM_TYPE_ECPRI:
13529                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
13530                                 /* Create it only the first time to be used. */
13531                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
13532                                 if (ret)
13533                                         return rte_flow_error_set
13534                                                 (error, -ret,
13535                                                 RTE_FLOW_ERROR_TYPE_ITEM,
13536                                                 NULL,
13537                                                 "cannot create eCPRI parser");
13538                         }
13539                         flow_dv_translate_item_ecpri(dev, match_mask,
13540                                                      match_value, items,
13541                                                      last_item);
13542                         /* No other protocol should follow eCPRI layer. */
13543                         last_item = MLX5_FLOW_LAYER_ECPRI;
13544                         break;
13545                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
13546                         flow_dv_translate_item_integrity(items, integrity_items,
13547                                                          &last_item);
13548                         break;
13549                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
13550                         flow_dv_translate_item_aso_ct(dev, match_mask,
13551                                                       match_value, items);
13552                         break;
13553                 case RTE_FLOW_ITEM_TYPE_FLEX:
13554                         flow_dv_translate_item_flex(dev, match_mask,
13555                                                     match_value, items,
13556                                                     dev_flow, tunnel != 0);
13557                         last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
13558                                     MLX5_FLOW_ITEM_OUTER_FLEX;
13559                         break;
13560                 default:
13561                         break;
13562                 }
13563                 item_flags |= last_item;
13564         }
13565         /*
13566          * When E-Switch mode is enabled, we have two cases where we need to
13567          * set the source port manually.
13568          * The first one, is in case of Nic steering rule, and the second is
13569          * E-Switch rule where no port_id item was found. In both cases
13570          * the source port is set according the current port in use.
13571          */
13572         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
13573             (priv->representor || priv->master)) {
13574                 if (flow_dv_translate_item_port_id(dev, match_mask,
13575                                                    match_value, NULL, attr))
13576                         return -rte_errno;
13577         }
13578         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
13579                 flow_dv_translate_item_integrity_post(match_mask, match_value,
13580                                                       integrity_items,
13581                                                       item_flags);
13582         }
13583 #ifdef RTE_LIBRTE_MLX5_DEBUG
13584         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
13585                                               dev_flow->dv.value.buf));
13586 #endif
13587         /*
13588          * Layers may be already initialized from prefix flow if this dev_flow
13589          * is the suffix flow.
13590          */
13591         handle->layers |= item_flags;
13592         if (action_flags & MLX5_FLOW_ACTION_RSS)
13593                 flow_dv_hashfields_set(dev_flow, rss_desc);
13594         /* If has RSS action in the sample action, the Sample/Mirror resource
13595          * should be registered after the hash filed be update.
13596          */
13597         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
13598                 ret = flow_dv_translate_action_sample(dev,
13599                                                       sample,
13600                                                       dev_flow, attr,
13601                                                       &num_of_dest,
13602                                                       sample_actions,
13603                                                       &sample_res,
13604                                                       error);
13605                 if (ret < 0)
13606                         return ret;
13607                 ret = flow_dv_create_action_sample(dev,
13608                                                    dev_flow,
13609                                                    num_of_dest,
13610                                                    &sample_res,
13611                                                    &mdest_res,
13612                                                    sample_actions,
13613                                                    action_flags,
13614                                                    error);
13615                 if (ret < 0)
13616                         return rte_flow_error_set
13617                                                 (error, rte_errno,
13618                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13619                                                 NULL,
13620                                                 "cannot create sample action");
13621                 if (num_of_dest > 1) {
13622                         dev_flow->dv.actions[sample_act_pos] =
13623                         dev_flow->dv.dest_array_res->action;
13624                 } else {
13625                         dev_flow->dv.actions[sample_act_pos] =
13626                         dev_flow->dv.sample_res->verbs_action;
13627                 }
13628         }
13629         /*
13630          * For multiple destination (sample action with ratio=1), the encap
13631          * action and port id action will be combined into group action.
13632          * So need remove the original these actions in the flow and only
13633          * use the sample action instead of.
13634          */
13635         if (num_of_dest > 1 &&
13636             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
13637                 int i;
13638                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
13639
13640                 for (i = 0; i < actions_n; i++) {
13641                         if ((sample_act->dr_encap_action &&
13642                                 sample_act->dr_encap_action ==
13643                                 dev_flow->dv.actions[i]) ||
13644                                 (sample_act->dr_port_id_action &&
13645                                 sample_act->dr_port_id_action ==
13646                                 dev_flow->dv.actions[i]) ||
13647                                 (sample_act->dr_jump_action &&
13648                                 sample_act->dr_jump_action ==
13649                                 dev_flow->dv.actions[i]))
13650                                 continue;
13651                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
13652                 }
13653                 memcpy((void *)dev_flow->dv.actions,
13654                                 (void *)temp_actions,
13655                                 tmp_actions_n * sizeof(void *));
13656                 actions_n = tmp_actions_n;
13657         }
13658         dev_flow->dv.actions_n = actions_n;
13659         dev_flow->act_flags = action_flags;
13660         if (wks->skip_matcher_reg)
13661                 return 0;
13662         /* Register matcher. */
13663         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
13664                                     matcher.mask.size);
13665         matcher.priority = mlx5_get_matcher_priority(dev, attr,
13666                                                      matcher.priority,
13667                                                      dev_flow->external);
13668         /**
13669          * When creating meter drop flow in drop table, using original
13670          * 5-tuple match, the matcher priority should be lower than
13671          * mtr_id matcher.
13672          */
13673         if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
13674             dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP &&
13675             matcher.priority <= MLX5_REG_BITS)
13676                 matcher.priority += MLX5_REG_BITS;
13677         /* reserved field no needs to be set to 0 here. */
13678         tbl_key.is_fdb = attr->transfer;
13679         tbl_key.is_egress = attr->egress;
13680         tbl_key.level = dev_flow->dv.group;
13681         tbl_key.id = dev_flow->dv.table_id;
13682         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
13683                                      tunnel, attr->group, error))
13684                 return -rte_errno;
13685         return 0;
13686 }
13687
13688 /**
13689  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13690  * and tunnel.
13691  *
13692  * @param[in, out] action
13693  *   Shred RSS action holding hash RX queue objects.
13694  * @param[in] hash_fields
13695  *   Defines combination of packet fields to participate in RX hash.
13696  * @param[in] tunnel
13697  *   Tunnel type
13698  * @param[in] hrxq_idx
13699  *   Hash RX queue index to set.
13700  *
13701  * @return
13702  *   0 on success, otherwise negative errno value.
13703  */
13704 static int
13705 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
13706                               const uint64_t hash_fields,
13707                               uint32_t hrxq_idx)
13708 {
13709         uint32_t *hrxqs = action->hrxq;
13710
13711         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13712         case MLX5_RSS_HASH_IPV4:
13713                 /* fall-through. */
13714         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13715                 /* fall-through. */
13716         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13717                 hrxqs[0] = hrxq_idx;
13718                 return 0;
13719         case MLX5_RSS_HASH_IPV4_TCP:
13720                 /* fall-through. */
13721         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13722                 /* fall-through. */
13723         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13724                 hrxqs[1] = hrxq_idx;
13725                 return 0;
13726         case MLX5_RSS_HASH_IPV4_UDP:
13727                 /* fall-through. */
13728         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13729                 /* fall-through. */
13730         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13731                 hrxqs[2] = hrxq_idx;
13732                 return 0;
13733         case MLX5_RSS_HASH_IPV6:
13734                 /* fall-through. */
13735         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13736                 /* fall-through. */
13737         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13738                 hrxqs[3] = hrxq_idx;
13739                 return 0;
13740         case MLX5_RSS_HASH_IPV6_TCP:
13741                 /* fall-through. */
13742         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13743                 /* fall-through. */
13744         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13745                 hrxqs[4] = hrxq_idx;
13746                 return 0;
13747         case MLX5_RSS_HASH_IPV6_UDP:
13748                 /* fall-through. */
13749         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13750                 /* fall-through. */
13751         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13752                 hrxqs[5] = hrxq_idx;
13753                 return 0;
13754         case MLX5_RSS_HASH_NONE:
13755                 hrxqs[6] = hrxq_idx;
13756                 return 0;
13757         default:
13758                 return -1;
13759         }
13760 }
13761
13762 /**
13763  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13764  * and tunnel.
13765  *
13766  * @param[in] dev
13767  *   Pointer to the Ethernet device structure.
13768  * @param[in] idx
13769  *   Shared RSS action ID holding hash RX queue objects.
13770  * @param[in] hash_fields
13771  *   Defines combination of packet fields to participate in RX hash.
13772  * @param[in] tunnel
13773  *   Tunnel type
13774  *
13775  * @return
13776  *   Valid hash RX queue index, otherwise 0.
13777  */
13778 static uint32_t
13779 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
13780                                  const uint64_t hash_fields)
13781 {
13782         struct mlx5_priv *priv = dev->data->dev_private;
13783         struct mlx5_shared_action_rss *shared_rss =
13784             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13785         const uint32_t *hrxqs = shared_rss->hrxq;
13786
13787         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13788         case MLX5_RSS_HASH_IPV4:
13789                 /* fall-through. */
13790         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13791                 /* fall-through. */
13792         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13793                 return hrxqs[0];
13794         case MLX5_RSS_HASH_IPV4_TCP:
13795                 /* fall-through. */
13796         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13797                 /* fall-through. */
13798         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13799                 return hrxqs[1];
13800         case MLX5_RSS_HASH_IPV4_UDP:
13801                 /* fall-through. */
13802         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13803                 /* fall-through. */
13804         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13805                 return hrxqs[2];
13806         case MLX5_RSS_HASH_IPV6:
13807                 /* fall-through. */
13808         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13809                 /* fall-through. */
13810         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13811                 return hrxqs[3];
13812         case MLX5_RSS_HASH_IPV6_TCP:
13813                 /* fall-through. */
13814         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13815                 /* fall-through. */
13816         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13817                 return hrxqs[4];
13818         case MLX5_RSS_HASH_IPV6_UDP:
13819                 /* fall-through. */
13820         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13821                 /* fall-through. */
13822         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13823                 return hrxqs[5];
13824         case MLX5_RSS_HASH_NONE:
13825                 return hrxqs[6];
13826         default:
13827                 return 0;
13828         }
13829
13830 }
13831
13832 /**
13833  * Apply the flow to the NIC, lock free,
13834  * (mutex should be acquired by caller).
13835  *
13836  * @param[in] dev
13837  *   Pointer to the Ethernet device structure.
13838  * @param[in, out] flow
13839  *   Pointer to flow structure.
13840  * @param[out] error
13841  *   Pointer to error structure.
13842  *
13843  * @return
13844  *   0 on success, a negative errno value otherwise and rte_errno is set.
13845  */
13846 static int
13847 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
13848               struct rte_flow_error *error)
13849 {
13850         struct mlx5_flow_dv_workspace *dv;
13851         struct mlx5_flow_handle *dh;
13852         struct mlx5_flow_handle_dv *dv_h;
13853         struct mlx5_flow *dev_flow;
13854         struct mlx5_priv *priv = dev->data->dev_private;
13855         uint32_t handle_idx;
13856         int n;
13857         int err;
13858         int idx;
13859         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
13860         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
13861         uint8_t misc_mask;
13862
13863         MLX5_ASSERT(wks);
13864         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
13865                 dev_flow = &wks->flows[idx];
13866                 dv = &dev_flow->dv;
13867                 dh = dev_flow->handle;
13868                 dv_h = &dh->dvh;
13869                 n = dv->actions_n;
13870                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
13871                         if (dv->transfer) {
13872                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13873                                 dv->actions[n++] = priv->sh->dr_drop_action;
13874                         } else {
13875 #ifdef HAVE_MLX5DV_DR
13876                                 /* DR supports drop action placeholder. */
13877                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13878                                 dv->actions[n++] = dv->group ?
13879                                         priv->sh->dr_drop_action :
13880                                         priv->root_drop_action;
13881 #else
13882                                 /* For DV we use the explicit drop queue. */
13883                                 MLX5_ASSERT(priv->drop_queue.hrxq);
13884                                 dv->actions[n++] =
13885                                                 priv->drop_queue.hrxq->action;
13886 #endif
13887                         }
13888                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
13889                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
13890                         struct mlx5_hrxq *hrxq;
13891                         uint32_t hrxq_idx;
13892
13893                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
13894                                                     &hrxq_idx);
13895                         if (!hrxq) {
13896                                 rte_flow_error_set
13897                                         (error, rte_errno,
13898                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13899                                          "cannot get hash queue");
13900                                 goto error;
13901                         }
13902                         dh->rix_hrxq = hrxq_idx;
13903                         dv->actions[n++] = hrxq->action;
13904                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13905                         struct mlx5_hrxq *hrxq = NULL;
13906                         uint32_t hrxq_idx;
13907
13908                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
13909                                                 rss_desc->shared_rss,
13910                                                 dev_flow->hash_fields);
13911                         if (hrxq_idx)
13912                                 hrxq = mlx5_ipool_get
13913                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
13914                                          hrxq_idx);
13915                         if (!hrxq) {
13916                                 rte_flow_error_set
13917                                         (error, rte_errno,
13918                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13919                                          "cannot get hash queue");
13920                                 goto error;
13921                         }
13922                         dh->rix_srss = rss_desc->shared_rss;
13923                         dv->actions[n++] = hrxq->action;
13924                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
13925                         if (!priv->sh->default_miss_action) {
13926                                 rte_flow_error_set
13927                                         (error, rte_errno,
13928                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13929                                          "default miss action not be created.");
13930                                 goto error;
13931                         }
13932                         dv->actions[n++] = priv->sh->default_miss_action;
13933                 }
13934                 misc_mask = flow_dv_matcher_enable(dv->value.buf);
13935                 __flow_dv_adjust_buf_size(&dv->value.size, misc_mask);
13936                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
13937                                                (void *)&dv->value, n,
13938                                                dv->actions, &dh->drv_flow);
13939                 if (err) {
13940                         rte_flow_error_set
13941                                 (error, errno,
13942                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13943                                 NULL,
13944                                 (!priv->config.allow_duplicate_pattern &&
13945                                 errno == EEXIST) ?
13946                                 "duplicating pattern is not allowed" :
13947                                 "hardware refuses to create flow");
13948                         goto error;
13949                 }
13950                 if (priv->vmwa_context &&
13951                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
13952                         /*
13953                          * The rule contains the VLAN pattern.
13954                          * For VF we are going to create VLAN
13955                          * interface to make hypervisor set correct
13956                          * e-Switch vport context.
13957                          */
13958                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
13959                 }
13960         }
13961         return 0;
13962 error:
13963         err = rte_errno; /* Save rte_errno before cleanup. */
13964         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
13965                        handle_idx, dh, next) {
13966                 /* hrxq is union, don't clear it if the flag is not set. */
13967                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
13968                         mlx5_hrxq_release(dev, dh->rix_hrxq);
13969                         dh->rix_hrxq = 0;
13970                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13971                         dh->rix_srss = 0;
13972                 }
13973                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
13974                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
13975         }
13976         rte_errno = err; /* Restore rte_errno. */
13977         return -rte_errno;
13978 }
13979
13980 void
13981 flow_dv_matcher_remove_cb(void *tool_ctx __rte_unused,
13982                           struct mlx5_list_entry *entry)
13983 {
13984         struct mlx5_flow_dv_matcher *resource = container_of(entry,
13985                                                              typeof(*resource),
13986                                                              entry);
13987
13988         claim_zero(mlx5_flow_os_destroy_flow_matcher(resource->matcher_object));
13989         mlx5_free(resource);
13990 }
13991
13992 /**
13993  * Release the flow matcher.
13994  *
13995  * @param dev
13996  *   Pointer to Ethernet device.
13997  * @param port_id
13998  *   Index to port ID action resource.
13999  *
14000  * @return
14001  *   1 while a reference on it exists, 0 when freed.
14002  */
14003 static int
14004 flow_dv_matcher_release(struct rte_eth_dev *dev,
14005                         struct mlx5_flow_handle *handle)
14006 {
14007         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
14008         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
14009                                                             typeof(*tbl), tbl);
14010         int ret;
14011
14012         MLX5_ASSERT(matcher->matcher_object);
14013         ret = mlx5_list_unregister(tbl->matchers, &matcher->entry);
14014         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
14015         return ret;
14016 }
14017
14018 void
14019 flow_dv_encap_decap_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14020 {
14021         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14022         struct mlx5_flow_dv_encap_decap_resource *res =
14023                                        container_of(entry, typeof(*res), entry);
14024
14025         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
14026         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
14027 }
14028
14029 /**
14030  * Release an encap/decap resource.
14031  *
14032  * @param dev
14033  *   Pointer to Ethernet device.
14034  * @param encap_decap_idx
14035  *   Index of encap decap resource.
14036  *
14037  * @return
14038  *   1 while a reference on it exists, 0 when freed.
14039  */
14040 static int
14041 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
14042                                      uint32_t encap_decap_idx)
14043 {
14044         struct mlx5_priv *priv = dev->data->dev_private;
14045         struct mlx5_flow_dv_encap_decap_resource *resource;
14046
14047         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
14048                                   encap_decap_idx);
14049         if (!resource)
14050                 return 0;
14051         MLX5_ASSERT(resource->action);
14052         return mlx5_hlist_unregister(priv->sh->encaps_decaps, &resource->entry);
14053 }
14054
14055 /**
14056  * Release an jump to table action resource.
14057  *
14058  * @param dev
14059  *   Pointer to Ethernet device.
14060  * @param rix_jump
14061  *   Index to the jump action resource.
14062  *
14063  * @return
14064  *   1 while a reference on it exists, 0 when freed.
14065  */
14066 static int
14067 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
14068                                   uint32_t rix_jump)
14069 {
14070         struct mlx5_priv *priv = dev->data->dev_private;
14071         struct mlx5_flow_tbl_data_entry *tbl_data;
14072
14073         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
14074                                   rix_jump);
14075         if (!tbl_data)
14076                 return 0;
14077         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
14078 }
14079
14080 void
14081 flow_dv_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14082 {
14083         struct mlx5_flow_dv_modify_hdr_resource *res =
14084                 container_of(entry, typeof(*res), entry);
14085         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14086
14087         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
14088         mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
14089 }
14090
14091 /**
14092  * Release a modify-header resource.
14093  *
14094  * @param dev
14095  *   Pointer to Ethernet device.
14096  * @param handle
14097  *   Pointer to mlx5_flow_handle.
14098  *
14099  * @return
14100  *   1 while a reference on it exists, 0 when freed.
14101  */
14102 static int
14103 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
14104                                     struct mlx5_flow_handle *handle)
14105 {
14106         struct mlx5_priv *priv = dev->data->dev_private;
14107         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
14108
14109         MLX5_ASSERT(entry->action);
14110         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
14111 }
14112
14113 void
14114 flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14115 {
14116         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14117         struct mlx5_flow_dv_port_id_action_resource *resource =
14118                                   container_of(entry, typeof(*resource), entry);
14119
14120         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14121         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
14122 }
14123
14124 /**
14125  * Release port ID action resource.
14126  *
14127  * @param dev
14128  *   Pointer to Ethernet device.
14129  * @param handle
14130  *   Pointer to mlx5_flow_handle.
14131  *
14132  * @return
14133  *   1 while a reference on it exists, 0 when freed.
14134  */
14135 static int
14136 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
14137                                         uint32_t port_id)
14138 {
14139         struct mlx5_priv *priv = dev->data->dev_private;
14140         struct mlx5_flow_dv_port_id_action_resource *resource;
14141
14142         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
14143         if (!resource)
14144                 return 0;
14145         MLX5_ASSERT(resource->action);
14146         return mlx5_list_unregister(priv->sh->port_id_action_list,
14147                                     &resource->entry);
14148 }
14149
14150 /**
14151  * Release shared RSS action resource.
14152  *
14153  * @param dev
14154  *   Pointer to Ethernet device.
14155  * @param srss
14156  *   Shared RSS action index.
14157  */
14158 static void
14159 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
14160 {
14161         struct mlx5_priv *priv = dev->data->dev_private;
14162         struct mlx5_shared_action_rss *shared_rss;
14163
14164         shared_rss = mlx5_ipool_get
14165                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
14166         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14167 }
14168
14169 void
14170 flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
14171 {
14172         struct mlx5_dev_ctx_shared *sh = tool_ctx;
14173         struct mlx5_flow_dv_push_vlan_action_resource *resource =
14174                         container_of(entry, typeof(*resource), entry);
14175
14176         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14177         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
14178 }
14179
14180 /**
14181  * Release push vlan action resource.
14182  *
14183  * @param dev
14184  *   Pointer to Ethernet device.
14185  * @param handle
14186  *   Pointer to mlx5_flow_handle.
14187  *
14188  * @return
14189  *   1 while a reference on it exists, 0 when freed.
14190  */
14191 static int
14192 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
14193                                           struct mlx5_flow_handle *handle)
14194 {
14195         struct mlx5_priv *priv = dev->data->dev_private;
14196         struct mlx5_flow_dv_push_vlan_action_resource *resource;
14197         uint32_t idx = handle->dvh.rix_push_vlan;
14198
14199         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
14200         if (!resource)
14201                 return 0;
14202         MLX5_ASSERT(resource->action);
14203         return mlx5_list_unregister(priv->sh->push_vlan_action_list,
14204                                     &resource->entry);
14205 }
14206
14207 /**
14208  * Release the fate resource.
14209  *
14210  * @param dev
14211  *   Pointer to Ethernet device.
14212  * @param handle
14213  *   Pointer to mlx5_flow_handle.
14214  */
14215 static void
14216 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
14217                                struct mlx5_flow_handle *handle)
14218 {
14219         if (!handle->rix_fate)
14220                 return;
14221         switch (handle->fate_action) {
14222         case MLX5_FLOW_FATE_QUEUE:
14223                 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
14224                         mlx5_hrxq_release(dev, handle->rix_hrxq);
14225                 break;
14226         case MLX5_FLOW_FATE_JUMP:
14227                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
14228                 break;
14229         case MLX5_FLOW_FATE_PORT_ID:
14230                 flow_dv_port_id_action_resource_release(dev,
14231                                 handle->rix_port_id_action);
14232                 break;
14233         default:
14234                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
14235                 break;
14236         }
14237         handle->rix_fate = 0;
14238 }
14239
14240 void
14241 flow_dv_sample_remove_cb(void *tool_ctx __rte_unused,
14242                          struct mlx5_list_entry *entry)
14243 {
14244         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
14245                                                               typeof(*resource),
14246                                                               entry);
14247         struct rte_eth_dev *dev = resource->dev;
14248         struct mlx5_priv *priv = dev->data->dev_private;
14249
14250         if (resource->verbs_action)
14251                 claim_zero(mlx5_flow_os_destroy_flow_action
14252                                                       (resource->verbs_action));
14253         if (resource->normal_path_tbl)
14254                 flow_dv_tbl_resource_release(MLX5_SH(dev),
14255                                              resource->normal_path_tbl);
14256         flow_dv_sample_sub_actions_release(dev, &resource->sample_idx);
14257         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
14258         DRV_LOG(DEBUG, "sample resource %p: removed", (void *)resource);
14259 }
14260
14261 /**
14262  * Release an sample resource.
14263  *
14264  * @param dev
14265  *   Pointer to Ethernet device.
14266  * @param handle
14267  *   Pointer to mlx5_flow_handle.
14268  *
14269  * @return
14270  *   1 while a reference on it exists, 0 when freed.
14271  */
14272 static int
14273 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
14274                                      struct mlx5_flow_handle *handle)
14275 {
14276         struct mlx5_priv *priv = dev->data->dev_private;
14277         struct mlx5_flow_dv_sample_resource *resource;
14278
14279         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
14280                                   handle->dvh.rix_sample);
14281         if (!resource)
14282                 return 0;
14283         MLX5_ASSERT(resource->verbs_action);
14284         return mlx5_list_unregister(priv->sh->sample_action_list,
14285                                     &resource->entry);
14286 }
14287
14288 void
14289 flow_dv_dest_array_remove_cb(void *tool_ctx __rte_unused,
14290                              struct mlx5_list_entry *entry)
14291 {
14292         struct mlx5_flow_dv_dest_array_resource *resource =
14293                         container_of(entry, typeof(*resource), entry);
14294         struct rte_eth_dev *dev = resource->dev;
14295         struct mlx5_priv *priv = dev->data->dev_private;
14296         uint32_t i = 0;
14297
14298         MLX5_ASSERT(resource->action);
14299         if (resource->action)
14300                 claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14301         for (; i < resource->num_of_dest; i++)
14302                 flow_dv_sample_sub_actions_release(dev,
14303                                                    &resource->sample_idx[i]);
14304         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
14305         DRV_LOG(DEBUG, "destination array resource %p: removed",
14306                 (void *)resource);
14307 }
14308
14309 /**
14310  * Release an destination array resource.
14311  *
14312  * @param dev
14313  *   Pointer to Ethernet device.
14314  * @param handle
14315  *   Pointer to mlx5_flow_handle.
14316  *
14317  * @return
14318  *   1 while a reference on it exists, 0 when freed.
14319  */
14320 static int
14321 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
14322                                     struct mlx5_flow_handle *handle)
14323 {
14324         struct mlx5_priv *priv = dev->data->dev_private;
14325         struct mlx5_flow_dv_dest_array_resource *resource;
14326
14327         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
14328                                   handle->dvh.rix_dest_array);
14329         if (!resource)
14330                 return 0;
14331         MLX5_ASSERT(resource->action);
14332         return mlx5_list_unregister(priv->sh->dest_array_list,
14333                                     &resource->entry);
14334 }
14335
14336 static void
14337 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
14338 {
14339         struct mlx5_priv *priv = dev->data->dev_private;
14340         struct mlx5_dev_ctx_shared *sh = priv->sh;
14341         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
14342                                 sh->geneve_tlv_option_resource;
14343         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
14344         if (geneve_opt_resource) {
14345                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
14346                                          __ATOMIC_RELAXED))) {
14347                         claim_zero(mlx5_devx_cmd_destroy
14348                                         (geneve_opt_resource->obj));
14349                         mlx5_free(sh->geneve_tlv_option_resource);
14350                         sh->geneve_tlv_option_resource = NULL;
14351                 }
14352         }
14353         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
14354 }
14355
14356 /**
14357  * Remove the flow from the NIC but keeps it in memory.
14358  * Lock free, (mutex should be acquired by caller).
14359  *
14360  * @param[in] dev
14361  *   Pointer to Ethernet device.
14362  * @param[in, out] flow
14363  *   Pointer to flow structure.
14364  */
14365 static void
14366 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
14367 {
14368         struct mlx5_flow_handle *dh;
14369         uint32_t handle_idx;
14370         struct mlx5_priv *priv = dev->data->dev_private;
14371
14372         if (!flow)
14373                 return;
14374         handle_idx = flow->dev_handles;
14375         while (handle_idx) {
14376                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14377                                     handle_idx);
14378                 if (!dh)
14379                         return;
14380                 if (dh->drv_flow) {
14381                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
14382                         dh->drv_flow = NULL;
14383                 }
14384                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
14385                         flow_dv_fate_resource_release(dev, dh);
14386                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
14387                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
14388                 handle_idx = dh->next.next;
14389         }
14390 }
14391
14392 /**
14393  * Remove the flow from the NIC and the memory.
14394  * Lock free, (mutex should be acquired by caller).
14395  *
14396  * @param[in] dev
14397  *   Pointer to the Ethernet device structure.
14398  * @param[in, out] flow
14399  *   Pointer to flow structure.
14400  */
14401 static void
14402 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
14403 {
14404         struct mlx5_flow_handle *dev_handle;
14405         struct mlx5_priv *priv = dev->data->dev_private;
14406         struct mlx5_flow_meter_info *fm = NULL;
14407         uint32_t srss = 0;
14408
14409         if (!flow)
14410                 return;
14411         flow_dv_remove(dev, flow);
14412         if (flow->counter) {
14413                 flow_dv_counter_free(dev, flow->counter);
14414                 flow->counter = 0;
14415         }
14416         if (flow->meter) {
14417                 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
14418                 if (fm)
14419                         mlx5_flow_meter_detach(priv, fm);
14420                 flow->meter = 0;
14421         }
14422         /* Keep the current age handling by default. */
14423         if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
14424                 flow_dv_aso_ct_release(dev, flow->ct, NULL);
14425         else if (flow->age)
14426                 flow_dv_aso_age_release(dev, flow->age);
14427         if (flow->geneve_tlv_option) {
14428                 flow_dv_geneve_tlv_option_resource_release(dev);
14429                 flow->geneve_tlv_option = 0;
14430         }
14431         while (flow->dev_handles) {
14432                 uint32_t tmp_idx = flow->dev_handles;
14433
14434                 dev_handle = mlx5_ipool_get(priv->sh->ipool
14435                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
14436                 if (!dev_handle)
14437                         return;
14438                 flow->dev_handles = dev_handle->next.next;
14439                 while (dev_handle->flex_item) {
14440                         int index = rte_bsf32(dev_handle->flex_item);
14441
14442                         mlx5_flex_release_index(dev, index);
14443                         dev_handle->flex_item &= ~RTE_BIT32(index);
14444                 }
14445                 if (dev_handle->dvh.matcher)
14446                         flow_dv_matcher_release(dev, dev_handle);
14447                 if (dev_handle->dvh.rix_sample)
14448                         flow_dv_sample_resource_release(dev, dev_handle);
14449                 if (dev_handle->dvh.rix_dest_array)
14450                         flow_dv_dest_array_resource_release(dev, dev_handle);
14451                 if (dev_handle->dvh.rix_encap_decap)
14452                         flow_dv_encap_decap_resource_release(dev,
14453                                 dev_handle->dvh.rix_encap_decap);
14454                 if (dev_handle->dvh.modify_hdr)
14455                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
14456                 if (dev_handle->dvh.rix_push_vlan)
14457                         flow_dv_push_vlan_action_resource_release(dev,
14458                                                                   dev_handle);
14459                 if (dev_handle->dvh.rix_tag)
14460                         flow_dv_tag_release(dev,
14461                                             dev_handle->dvh.rix_tag);
14462                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
14463                         flow_dv_fate_resource_release(dev, dev_handle);
14464                 else if (!srss)
14465                         srss = dev_handle->rix_srss;
14466                 if (fm && dev_handle->is_meter_flow_id &&
14467                     dev_handle->split_flow_id)
14468                         mlx5_ipool_free(fm->flow_ipool,
14469                                         dev_handle->split_flow_id);
14470                 else if (dev_handle->split_flow_id &&
14471                     !dev_handle->is_meter_flow_id)
14472                         mlx5_ipool_free(priv->sh->ipool
14473                                         [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
14474                                         dev_handle->split_flow_id);
14475                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14476                            tmp_idx);
14477         }
14478         if (srss)
14479                 flow_dv_shared_rss_action_release(dev, srss);
14480 }
14481
14482 /**
14483  * Release array of hash RX queue objects.
14484  * Helper function.
14485  *
14486  * @param[in] dev
14487  *   Pointer to the Ethernet device structure.
14488  * @param[in, out] hrxqs
14489  *   Array of hash RX queue objects.
14490  *
14491  * @return
14492  *   Total number of references to hash RX queue objects in *hrxqs* array
14493  *   after this operation.
14494  */
14495 static int
14496 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
14497                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
14498 {
14499         size_t i;
14500         int remaining = 0;
14501
14502         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
14503                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
14504
14505                 if (!ret)
14506                         (*hrxqs)[i] = 0;
14507                 remaining += ret;
14508         }
14509         return remaining;
14510 }
14511
14512 /**
14513  * Release all hash RX queue objects representing shared RSS action.
14514  *
14515  * @param[in] dev
14516  *   Pointer to the Ethernet device structure.
14517  * @param[in, out] action
14518  *   Shared RSS action to remove hash RX queue objects from.
14519  *
14520  * @return
14521  *   Total number of references to hash RX queue objects stored in *action*
14522  *   after this operation.
14523  *   Expected to be 0 if no external references held.
14524  */
14525 static int
14526 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
14527                                  struct mlx5_shared_action_rss *shared_rss)
14528 {
14529         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
14530 }
14531
14532 /**
14533  * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
14534  * user input.
14535  *
14536  * Only one hash value is available for one L3+L4 combination:
14537  * for example:
14538  * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
14539  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
14540  * same slot in mlx5_rss_hash_fields.
14541  *
14542  * @param[in] rss
14543  *   Pointer to the shared action RSS conf.
14544  * @param[in, out] hash_field
14545  *   hash_field variable needed to be adjusted.
14546  *
14547  * @return
14548  *   void
14549  */
14550 static void
14551 __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
14552                                      uint64_t *hash_field)
14553 {
14554         uint64_t rss_types = rss->origin.types;
14555
14556         switch (*hash_field & ~IBV_RX_HASH_INNER) {
14557         case MLX5_RSS_HASH_IPV4:
14558                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
14559                         *hash_field &= ~MLX5_RSS_HASH_IPV4;
14560                         if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
14561                                 *hash_field |= IBV_RX_HASH_DST_IPV4;
14562                         else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
14563                                 *hash_field |= IBV_RX_HASH_SRC_IPV4;
14564                         else
14565                                 *hash_field |= MLX5_RSS_HASH_IPV4;
14566                 }
14567                 return;
14568         case MLX5_RSS_HASH_IPV6:
14569                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
14570                         *hash_field &= ~MLX5_RSS_HASH_IPV6;
14571                         if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
14572                                 *hash_field |= IBV_RX_HASH_DST_IPV6;
14573                         else if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
14574                                 *hash_field |= IBV_RX_HASH_SRC_IPV6;
14575                         else
14576                                 *hash_field |= MLX5_RSS_HASH_IPV6;
14577                 }
14578                 return;
14579         case MLX5_RSS_HASH_IPV4_UDP:
14580                 /* fall-through. */
14581         case MLX5_RSS_HASH_IPV6_UDP:
14582                 if (rss_types & RTE_ETH_RSS_UDP) {
14583                         *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
14584                         if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
14585                                 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
14586                         else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
14587                                 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
14588                         else
14589                                 *hash_field |= MLX5_UDP_IBV_RX_HASH;
14590                 }
14591                 return;
14592         case MLX5_RSS_HASH_IPV4_TCP:
14593                 /* fall-through. */
14594         case MLX5_RSS_HASH_IPV6_TCP:
14595                 if (rss_types & RTE_ETH_RSS_TCP) {
14596                         *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
14597                         if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
14598                                 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
14599                         else if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
14600                                 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
14601                         else
14602                                 *hash_field |= MLX5_TCP_IBV_RX_HASH;
14603                 }
14604                 return;
14605         default:
14606                 return;
14607         }
14608 }
14609
14610 /**
14611  * Setup shared RSS action.
14612  * Prepare set of hash RX queue objects sufficient to handle all valid
14613  * hash_fields combinations (see enum ibv_rx_hash_fields).
14614  *
14615  * @param[in] dev
14616  *   Pointer to the Ethernet device structure.
14617  * @param[in] action_idx
14618  *   Shared RSS action ipool index.
14619  * @param[in, out] action
14620  *   Partially initialized shared RSS action.
14621  * @param[out] error
14622  *   Perform verbose error reporting if not NULL. Initialized in case of
14623  *   error only.
14624  *
14625  * @return
14626  *   0 on success, otherwise negative errno value.
14627  */
14628 static int
14629 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
14630                            uint32_t action_idx,
14631                            struct mlx5_shared_action_rss *shared_rss,
14632                            struct rte_flow_error *error)
14633 {
14634         struct mlx5_flow_rss_desc rss_desc = { 0 };
14635         size_t i;
14636         int err;
14637
14638         if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
14639                 return rte_flow_error_set(error, rte_errno,
14640                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14641                                           "cannot setup indirection table");
14642         }
14643         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
14644         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
14645         rss_desc.const_q = shared_rss->origin.queue;
14646         rss_desc.queue_num = shared_rss->origin.queue_num;
14647         /* Set non-zero value to indicate a shared RSS. */
14648         rss_desc.shared_rss = action_idx;
14649         rss_desc.ind_tbl = shared_rss->ind_tbl;
14650         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
14651                 uint32_t hrxq_idx;
14652                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
14653                 int tunnel = 0;
14654
14655                 __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
14656                 if (shared_rss->origin.level > 1) {
14657                         hash_fields |= IBV_RX_HASH_INNER;
14658                         tunnel = 1;
14659                 }
14660                 rss_desc.tunnel = tunnel;
14661                 rss_desc.hash_fields = hash_fields;
14662                 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
14663                 if (!hrxq_idx) {
14664                         rte_flow_error_set
14665                                 (error, rte_errno,
14666                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14667                                  "cannot get hash queue");
14668                         goto error_hrxq_new;
14669                 }
14670                 err = __flow_dv_action_rss_hrxq_set
14671                         (shared_rss, hash_fields, hrxq_idx);
14672                 MLX5_ASSERT(!err);
14673         }
14674         return 0;
14675 error_hrxq_new:
14676         err = rte_errno;
14677         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14678         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
14679                 shared_rss->ind_tbl = NULL;
14680         rte_errno = err;
14681         return -rte_errno;
14682 }
14683
14684 /**
14685  * Create shared RSS action.
14686  *
14687  * @param[in] dev
14688  *   Pointer to the Ethernet device structure.
14689  * @param[in] conf
14690  *   Shared action configuration.
14691  * @param[in] rss
14692  *   RSS action specification used to create shared action.
14693  * @param[out] error
14694  *   Perform verbose error reporting if not NULL. Initialized in case of
14695  *   error only.
14696  *
14697  * @return
14698  *   A valid shared action ID in case of success, 0 otherwise and
14699  *   rte_errno is set.
14700  */
14701 static uint32_t
14702 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
14703                             const struct rte_flow_indir_action_conf *conf,
14704                             const struct rte_flow_action_rss *rss,
14705                             struct rte_flow_error *error)
14706 {
14707         struct mlx5_priv *priv = dev->data->dev_private;
14708         struct mlx5_shared_action_rss *shared_rss = NULL;
14709         void *queue = NULL;
14710         struct rte_flow_action_rss *origin;
14711         const uint8_t *rss_key;
14712         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
14713         uint32_t idx;
14714
14715         RTE_SET_USED(conf);
14716         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14717                             0, SOCKET_ID_ANY);
14718         shared_rss = mlx5_ipool_zmalloc
14719                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
14720         if (!shared_rss || !queue) {
14721                 rte_flow_error_set(error, ENOMEM,
14722                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14723                                    "cannot allocate resource memory");
14724                 goto error_rss_init;
14725         }
14726         if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
14727                 rte_flow_error_set(error, E2BIG,
14728                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14729                                    "rss action number out of range");
14730                 goto error_rss_init;
14731         }
14732         shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
14733                                           sizeof(*shared_rss->ind_tbl),
14734                                           0, SOCKET_ID_ANY);
14735         if (!shared_rss->ind_tbl) {
14736                 rte_flow_error_set(error, ENOMEM,
14737                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14738                                    "cannot allocate resource memory");
14739                 goto error_rss_init;
14740         }
14741         memcpy(queue, rss->queue, queue_size);
14742         shared_rss->ind_tbl->queues = queue;
14743         shared_rss->ind_tbl->queues_n = rss->queue_num;
14744         origin = &shared_rss->origin;
14745         origin->func = rss->func;
14746         origin->level = rss->level;
14747         /* RSS type 0 indicates default RSS type (RTE_ETH_RSS_IP). */
14748         origin->types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
14749         /* NULL RSS key indicates default RSS key. */
14750         rss_key = !rss->key ? rss_hash_default_key : rss->key;
14751         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
14752         origin->key = &shared_rss->key[0];
14753         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
14754         origin->queue = queue;
14755         origin->queue_num = rss->queue_num;
14756         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
14757                 goto error_rss_init;
14758         rte_spinlock_init(&shared_rss->action_rss_sl);
14759         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14760         rte_spinlock_lock(&priv->shared_act_sl);
14761         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14762                      &priv->rss_shared_actions, idx, shared_rss, next);
14763         rte_spinlock_unlock(&priv->shared_act_sl);
14764         return idx;
14765 error_rss_init:
14766         if (shared_rss) {
14767                 if (shared_rss->ind_tbl)
14768                         mlx5_free(shared_rss->ind_tbl);
14769                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14770                                 idx);
14771         }
14772         if (queue)
14773                 mlx5_free(queue);
14774         return 0;
14775 }
14776
14777 /**
14778  * Destroy the shared RSS action.
14779  * Release related hash RX queue objects.
14780  *
14781  * @param[in] dev
14782  *   Pointer to the Ethernet device structure.
14783  * @param[in] idx
14784  *   The shared RSS action object ID to be removed.
14785  * @param[out] error
14786  *   Perform verbose error reporting if not NULL. Initialized in case of
14787  *   error only.
14788  *
14789  * @return
14790  *   0 on success, otherwise negative errno value.
14791  */
14792 static int
14793 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
14794                              struct rte_flow_error *error)
14795 {
14796         struct mlx5_priv *priv = dev->data->dev_private;
14797         struct mlx5_shared_action_rss *shared_rss =
14798             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14799         uint32_t old_refcnt = 1;
14800         int remaining;
14801         uint16_t *queue = NULL;
14802
14803         if (!shared_rss)
14804                 return rte_flow_error_set(error, EINVAL,
14805                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14806                                           "invalid shared action");
14807         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
14808                                          0, 0, __ATOMIC_ACQUIRE,
14809                                          __ATOMIC_RELAXED))
14810                 return rte_flow_error_set(error, EBUSY,
14811                                           RTE_FLOW_ERROR_TYPE_ACTION,
14812                                           NULL,
14813                                           "shared rss has references");
14814         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14815         if (remaining)
14816                 return rte_flow_error_set(error, EBUSY,
14817                                           RTE_FLOW_ERROR_TYPE_ACTION,
14818                                           NULL,
14819                                           "shared rss hrxq has references");
14820         queue = shared_rss->ind_tbl->queues;
14821         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
14822         if (remaining)
14823                 return rte_flow_error_set(error, EBUSY,
14824                                           RTE_FLOW_ERROR_TYPE_ACTION,
14825                                           NULL,
14826                                           "shared rss indirection table has"
14827                                           " references");
14828         mlx5_free(queue);
14829         rte_spinlock_lock(&priv->shared_act_sl);
14830         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14831                      &priv->rss_shared_actions, idx, shared_rss, next);
14832         rte_spinlock_unlock(&priv->shared_act_sl);
14833         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14834                         idx);
14835         return 0;
14836 }
14837
14838 /**
14839  * Create indirect action, lock free,
14840  * (mutex should be acquired by caller).
14841  * Dispatcher for action type specific call.
14842  *
14843  * @param[in] dev
14844  *   Pointer to the Ethernet device structure.
14845  * @param[in] conf
14846  *   Shared action configuration.
14847  * @param[in] action
14848  *   Action specification used to create indirect action.
14849  * @param[out] error
14850  *   Perform verbose error reporting if not NULL. Initialized in case of
14851  *   error only.
14852  *
14853  * @return
14854  *   A valid shared action handle in case of success, NULL otherwise and
14855  *   rte_errno is set.
14856  */
14857 static struct rte_flow_action_handle *
14858 flow_dv_action_create(struct rte_eth_dev *dev,
14859                       const struct rte_flow_indir_action_conf *conf,
14860                       const struct rte_flow_action *action,
14861                       struct rte_flow_error *err)
14862 {
14863         struct mlx5_priv *priv = dev->data->dev_private;
14864         uint32_t age_idx = 0;
14865         uint32_t idx = 0;
14866         uint32_t ret = 0;
14867
14868         switch (action->type) {
14869         case RTE_FLOW_ACTION_TYPE_RSS:
14870                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
14871                 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
14872                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14873                 break;
14874         case RTE_FLOW_ACTION_TYPE_AGE:
14875                 age_idx = flow_dv_aso_age_alloc(dev, err);
14876                 if (!age_idx) {
14877                         ret = -rte_errno;
14878                         break;
14879                 }
14880                 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
14881                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
14882                 flow_dv_aso_age_params_init(dev, age_idx,
14883                                         ((const struct rte_flow_action_age *)
14884                                                 action->conf)->context ?
14885                                         ((const struct rte_flow_action_age *)
14886                                                 action->conf)->context :
14887                                         (void *)(uintptr_t)idx,
14888                                         ((const struct rte_flow_action_age *)
14889                                                 action->conf)->timeout);
14890                 ret = age_idx;
14891                 break;
14892         case RTE_FLOW_ACTION_TYPE_COUNT:
14893                 ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
14894                 idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
14895                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14896                 break;
14897         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
14898                 ret = flow_dv_translate_create_conntrack(dev, action->conf,
14899                                                          err);
14900                 idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
14901                 break;
14902         default:
14903                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
14904                                    NULL, "action type not supported");
14905                 break;
14906         }
14907         return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
14908 }
14909
14910 /**
14911  * Destroy the indirect action.
14912  * Release action related resources on the NIC and the memory.
14913  * Lock free, (mutex should be acquired by caller).
14914  * Dispatcher for action type specific call.
14915  *
14916  * @param[in] dev
14917  *   Pointer to the Ethernet device structure.
14918  * @param[in] handle
14919  *   The indirect action object handle to be removed.
14920  * @param[out] error
14921  *   Perform verbose error reporting if not NULL. Initialized in case of
14922  *   error only.
14923  *
14924  * @return
14925  *   0 on success, otherwise negative errno value.
14926  */
14927 static int
14928 flow_dv_action_destroy(struct rte_eth_dev *dev,
14929                        struct rte_flow_action_handle *handle,
14930                        struct rte_flow_error *error)
14931 {
14932         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14933         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14934         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14935         struct mlx5_flow_counter *cnt;
14936         uint32_t no_flow_refcnt = 1;
14937         int ret;
14938
14939         switch (type) {
14940         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14941                 return __flow_dv_action_rss_release(dev, idx, error);
14942         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
14943                 cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
14944                 if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
14945                                                  &no_flow_refcnt, 1, false,
14946                                                  __ATOMIC_ACQUIRE,
14947                                                  __ATOMIC_RELAXED))
14948                         return rte_flow_error_set(error, EBUSY,
14949                                                   RTE_FLOW_ERROR_TYPE_ACTION,
14950                                                   NULL,
14951                                                   "Indirect count action has references");
14952                 flow_dv_counter_free(dev, idx);
14953                 return 0;
14954         case MLX5_INDIRECT_ACTION_TYPE_AGE:
14955                 ret = flow_dv_aso_age_release(dev, idx);
14956                 if (ret)
14957                         /*
14958                          * In this case, the last flow has a reference will
14959                          * actually release the age action.
14960                          */
14961                         DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
14962                                 " released with references %d.", idx, ret);
14963                 return 0;
14964         case MLX5_INDIRECT_ACTION_TYPE_CT:
14965                 ret = flow_dv_aso_ct_release(dev, idx, error);
14966                 if (ret < 0)
14967                         return ret;
14968                 if (ret > 0)
14969                         DRV_LOG(DEBUG, "Connection tracking object %u still "
14970                                 "has references %d.", idx, ret);
14971                 return 0;
14972         default:
14973                 return rte_flow_error_set(error, ENOTSUP,
14974                                           RTE_FLOW_ERROR_TYPE_ACTION,
14975                                           NULL,
14976                                           "action type not supported");
14977         }
14978 }
14979
14980 /**
14981  * Updates in place shared RSS action configuration.
14982  *
14983  * @param[in] dev
14984  *   Pointer to the Ethernet device structure.
14985  * @param[in] idx
14986  *   The shared RSS action object ID to be updated.
14987  * @param[in] action_conf
14988  *   RSS action specification used to modify *shared_rss*.
14989  * @param[out] error
14990  *   Perform verbose error reporting if not NULL. Initialized in case of
14991  *   error only.
14992  *
14993  * @return
14994  *   0 on success, otherwise negative errno value.
14995  * @note: currently only support update of RSS queues.
14996  */
14997 static int
14998 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
14999                             const struct rte_flow_action_rss *action_conf,
15000                             struct rte_flow_error *error)
15001 {
15002         struct mlx5_priv *priv = dev->data->dev_private;
15003         struct mlx5_shared_action_rss *shared_rss =
15004             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
15005         int ret = 0;
15006         void *queue = NULL;
15007         uint16_t *queue_old = NULL;
15008         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
15009
15010         if (!shared_rss)
15011                 return rte_flow_error_set(error, EINVAL,
15012                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15013                                           "invalid shared action to update");
15014         if (priv->obj_ops.ind_table_modify == NULL)
15015                 return rte_flow_error_set(error, ENOTSUP,
15016                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15017                                           "cannot modify indirection table");
15018         queue = mlx5_malloc(MLX5_MEM_ZERO,
15019                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
15020                             0, SOCKET_ID_ANY);
15021         if (!queue)
15022                 return rte_flow_error_set(error, ENOMEM,
15023                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15024                                           NULL,
15025                                           "cannot allocate resource memory");
15026         memcpy(queue, action_conf->queue, queue_size);
15027         MLX5_ASSERT(shared_rss->ind_tbl);
15028         rte_spinlock_lock(&shared_rss->action_rss_sl);
15029         queue_old = shared_rss->ind_tbl->queues;
15030         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
15031                                         queue, action_conf->queue_num, true);
15032         if (ret) {
15033                 mlx5_free(queue);
15034                 ret = rte_flow_error_set(error, rte_errno,
15035                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15036                                           "cannot update indirection table");
15037         } else {
15038                 mlx5_free(queue_old);
15039                 shared_rss->origin.queue = queue;
15040                 shared_rss->origin.queue_num = action_conf->queue_num;
15041         }
15042         rte_spinlock_unlock(&shared_rss->action_rss_sl);
15043         return ret;
15044 }
15045
15046 /*
15047  * Updates in place conntrack context or direction.
15048  * Context update should be synchronized.
15049  *
15050  * @param[in] dev
15051  *   Pointer to the Ethernet device structure.
15052  * @param[in] idx
15053  *   The conntrack object ID to be updated.
15054  * @param[in] update
15055  *   Pointer to the structure of information to update.
15056  * @param[out] error
15057  *   Perform verbose error reporting if not NULL. Initialized in case of
15058  *   error only.
15059  *
15060  * @return
15061  *   0 on success, otherwise negative errno value.
15062  */
15063 static int
15064 __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,
15065                            const struct rte_flow_modify_conntrack *update,
15066                            struct rte_flow_error *error)
15067 {
15068         struct mlx5_priv *priv = dev->data->dev_private;
15069         struct mlx5_aso_ct_action *ct;
15070         const struct rte_flow_action_conntrack *new_prf;
15071         int ret = 0;
15072         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15073         uint32_t dev_idx;
15074
15075         if (PORT_ID(priv) != owner)
15076                 return rte_flow_error_set(error, EACCES,
15077                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15078                                           NULL,
15079                                           "CT object owned by another port");
15080         dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15081         ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15082         if (!ct->refcnt)
15083                 return rte_flow_error_set(error, ENOMEM,
15084                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15085                                           NULL,
15086                                           "CT object is inactive");
15087         new_prf = &update->new_ct;
15088         if (update->direction)
15089                 ct->is_original = !!new_prf->is_original_dir;
15090         if (update->state) {
15091                 /* Only validate the profile when it needs to be updated. */
15092                 ret = mlx5_validate_action_ct(dev, new_prf, error);
15093                 if (ret)
15094                         return ret;
15095                 ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
15096                 if (ret)
15097                         return rte_flow_error_set(error, EIO,
15098                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15099                                         NULL,
15100                                         "Failed to send CT context update WQE");
15101                 /* Block until ready or a failure. */
15102                 ret = mlx5_aso_ct_available(priv->sh, ct);
15103                 if (ret)
15104                         rte_flow_error_set(error, rte_errno,
15105                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15106                                            NULL,
15107                                            "Timeout to get the CT update");
15108         }
15109         return ret;
15110 }
15111
15112 /**
15113  * Updates in place shared action configuration, lock free,
15114  * (mutex should be acquired by caller).
15115  *
15116  * @param[in] dev
15117  *   Pointer to the Ethernet device structure.
15118  * @param[in] handle
15119  *   The indirect action object handle to be updated.
15120  * @param[in] update
15121  *   Action specification used to modify the action pointed by *handle*.
15122  *   *update* could be of same type with the action pointed by the *handle*
15123  *   handle argument, or some other structures like a wrapper, depending on
15124  *   the indirect action type.
15125  * @param[out] error
15126  *   Perform verbose error reporting if not NULL. Initialized in case of
15127  *   error only.
15128  *
15129  * @return
15130  *   0 on success, otherwise negative errno value.
15131  */
15132 static int
15133 flow_dv_action_update(struct rte_eth_dev *dev,
15134                         struct rte_flow_action_handle *handle,
15135                         const void *update,
15136                         struct rte_flow_error *err)
15137 {
15138         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15139         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15140         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15141         const void *action_conf;
15142
15143         switch (type) {
15144         case MLX5_INDIRECT_ACTION_TYPE_RSS:
15145                 action_conf = ((const struct rte_flow_action *)update)->conf;
15146                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
15147         case MLX5_INDIRECT_ACTION_TYPE_CT:
15148                 return __flow_dv_action_ct_update(dev, idx, update, err);
15149         default:
15150                 return rte_flow_error_set(err, ENOTSUP,
15151                                           RTE_FLOW_ERROR_TYPE_ACTION,
15152                                           NULL,
15153                                           "action type update not supported");
15154         }
15155 }
15156
15157 /**
15158  * Destroy the meter sub policy table rules.
15159  * Lock free, (mutex should be acquired by caller).
15160  *
15161  * @param[in] dev
15162  *   Pointer to Ethernet device.
15163  * @param[in] sub_policy
15164  *   Pointer to meter sub policy table.
15165  */
15166 static void
15167 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
15168                              struct mlx5_flow_meter_sub_policy *sub_policy)
15169 {
15170         struct mlx5_priv *priv = dev->data->dev_private;
15171         struct mlx5_flow_tbl_data_entry *tbl;
15172         struct mlx5_flow_meter_policy *policy = sub_policy->main_policy;
15173         struct mlx5_flow_meter_info *next_fm;
15174         struct mlx5_sub_policy_color_rule *color_rule;
15175         void *tmp;
15176         uint32_t i;
15177
15178         for (i = 0; i < RTE_COLORS; i++) {
15179                 next_fm = NULL;
15180                 if (i == RTE_COLOR_GREEN && policy &&
15181                     policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR)
15182                         next_fm = mlx5_flow_meter_find(priv,
15183                                         policy->act_cnt[i].next_mtr_id, NULL);
15184                 RTE_TAILQ_FOREACH_SAFE(color_rule, &sub_policy->color_rules[i],
15185                                    next_port, tmp) {
15186                         claim_zero(mlx5_flow_os_destroy_flow(color_rule->rule));
15187                         tbl = container_of(color_rule->matcher->tbl,
15188                                            typeof(*tbl), tbl);
15189                         mlx5_list_unregister(tbl->matchers,
15190                                              &color_rule->matcher->entry);
15191                         TAILQ_REMOVE(&sub_policy->color_rules[i],
15192                                      color_rule, next_port);
15193                         mlx5_free(color_rule);
15194                         if (next_fm)
15195                                 mlx5_flow_meter_detach(priv, next_fm);
15196                 }
15197         }
15198         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15199                 if (sub_policy->rix_hrxq[i]) {
15200                         if (policy && !policy->is_hierarchy)
15201                                 mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
15202                         sub_policy->rix_hrxq[i] = 0;
15203                 }
15204                 if (sub_policy->jump_tbl[i]) {
15205                         flow_dv_tbl_resource_release(MLX5_SH(dev),
15206                                                      sub_policy->jump_tbl[i]);
15207                         sub_policy->jump_tbl[i] = NULL;
15208                 }
15209         }
15210         if (sub_policy->tbl_rsc) {
15211                 flow_dv_tbl_resource_release(MLX5_SH(dev),
15212                                              sub_policy->tbl_rsc);
15213                 sub_policy->tbl_rsc = NULL;
15214         }
15215 }
15216
15217 /**
15218  * Destroy policy rules, lock free,
15219  * (mutex should be acquired by caller).
15220  * Dispatcher for action type specific call.
15221  *
15222  * @param[in] dev
15223  *   Pointer to the Ethernet device structure.
15224  * @param[in] mtr_policy
15225  *   Meter policy struct.
15226  */
15227 static void
15228 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
15229                              struct mlx5_flow_meter_policy *mtr_policy)
15230 {
15231         uint32_t i, j;
15232         struct mlx5_flow_meter_sub_policy *sub_policy;
15233         uint16_t sub_policy_num;
15234
15235         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15236                 sub_policy_num = (mtr_policy->sub_policy_num >>
15237                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15238                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15239                 for (j = 0; j < sub_policy_num; j++) {
15240                         sub_policy = mtr_policy->sub_policys[i][j];
15241                         if (sub_policy)
15242                                 __flow_dv_destroy_sub_policy_rules(dev,
15243                                                                    sub_policy);
15244                 }
15245         }
15246 }
15247
15248 /**
15249  * Destroy policy action, lock free,
15250  * (mutex should be acquired by caller).
15251  * Dispatcher for action type specific call.
15252  *
15253  * @param[in] dev
15254  *   Pointer to the Ethernet device structure.
15255  * @param[in] mtr_policy
15256  *   Meter policy struct.
15257  */
15258 static void
15259 flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
15260                       struct mlx5_flow_meter_policy *mtr_policy)
15261 {
15262         struct rte_flow_action *rss_action;
15263         struct mlx5_flow_handle dev_handle;
15264         uint32_t i, j;
15265
15266         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15267                 if (mtr_policy->act_cnt[i].rix_mark) {
15268                         flow_dv_tag_release(dev,
15269                                 mtr_policy->act_cnt[i].rix_mark);
15270                         mtr_policy->act_cnt[i].rix_mark = 0;
15271                 }
15272                 if (mtr_policy->act_cnt[i].modify_hdr) {
15273                         dev_handle.dvh.modify_hdr =
15274                                 mtr_policy->act_cnt[i].modify_hdr;
15275                         flow_dv_modify_hdr_resource_release(dev, &dev_handle);
15276                 }
15277                 switch (mtr_policy->act_cnt[i].fate_action) {
15278                 case MLX5_FLOW_FATE_SHARED_RSS:
15279                         rss_action = mtr_policy->act_cnt[i].rss;
15280                         mlx5_free(rss_action);
15281                         break;
15282                 case MLX5_FLOW_FATE_PORT_ID:
15283                         if (mtr_policy->act_cnt[i].rix_port_id_action) {
15284                                 flow_dv_port_id_action_resource_release(dev,
15285                                 mtr_policy->act_cnt[i].rix_port_id_action);
15286                                 mtr_policy->act_cnt[i].rix_port_id_action = 0;
15287                         }
15288                         break;
15289                 case MLX5_FLOW_FATE_DROP:
15290                 case MLX5_FLOW_FATE_JUMP:
15291                         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15292                                 mtr_policy->act_cnt[i].dr_jump_action[j] =
15293                                                 NULL;
15294                         break;
15295                 default:
15296                         /*Queue action do nothing*/
15297                         break;
15298                 }
15299         }
15300         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15301                 mtr_policy->dr_drop_action[j] = NULL;
15302 }
15303
15304 /**
15305  * Create policy action per domain, lock free,
15306  * (mutex should be acquired by caller).
15307  * Dispatcher for action type specific call.
15308  *
15309  * @param[in] dev
15310  *   Pointer to the Ethernet device structure.
15311  * @param[in] mtr_policy
15312  *   Meter policy struct.
15313  * @param[in] action
15314  *   Action specification used to create meter actions.
15315  * @param[out] error
15316  *   Perform verbose error reporting if not NULL. Initialized in case of
15317  *   error only.
15318  *
15319  * @return
15320  *   0 on success, otherwise negative errno value.
15321  */
15322 static int
15323 __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
15324                         struct mlx5_flow_meter_policy *mtr_policy,
15325                         const struct rte_flow_action *actions[RTE_COLORS],
15326                         enum mlx5_meter_domain domain,
15327                         struct rte_mtr_error *error)
15328 {
15329         struct mlx5_priv *priv = dev->data->dev_private;
15330         struct rte_flow_error flow_err;
15331         const struct rte_flow_action *act;
15332         uint64_t action_flags;
15333         struct mlx5_flow_handle dh;
15334         struct mlx5_flow dev_flow;
15335         struct mlx5_flow_dv_port_id_action_resource port_id_action;
15336         int i, ret;
15337         uint8_t egress, transfer;
15338         struct mlx5_meter_policy_action_container *act_cnt = NULL;
15339         union {
15340                 struct mlx5_flow_dv_modify_hdr_resource res;
15341                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
15342                             sizeof(struct mlx5_modification_cmd) *
15343                             (MLX5_MAX_MODIFY_NUM + 1)];
15344         } mhdr_dummy;
15345         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
15346
15347         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15348         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15349         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
15350         memset(&dev_flow, 0, sizeof(struct mlx5_flow));
15351         memset(&port_id_action, 0,
15352                sizeof(struct mlx5_flow_dv_port_id_action_resource));
15353         memset(mhdr_res, 0, sizeof(*mhdr_res));
15354         mhdr_res->ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
15355                                        (egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
15356                                         MLX5DV_FLOW_TABLE_TYPE_NIC_RX);
15357         dev_flow.handle = &dh;
15358         dev_flow.dv.port_id_action = &port_id_action;
15359         dev_flow.external = true;
15360         for (i = 0; i < RTE_COLORS; i++) {
15361                 if (i < MLX5_MTR_RTE_COLORS)
15362                         act_cnt = &mtr_policy->act_cnt[i];
15363                 /* Skip the color policy actions creation. */
15364                 if ((i == RTE_COLOR_YELLOW && mtr_policy->skip_y) ||
15365                     (i == RTE_COLOR_GREEN && mtr_policy->skip_g))
15366                         continue;
15367                 action_flags = 0;
15368                 for (act = actions[i];
15369                      act && act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
15370                         switch (act->type) {
15371                         case RTE_FLOW_ACTION_TYPE_MARK:
15372                         {
15373                                 uint32_t tag_be = mlx5_flow_mark_set
15374                                         (((const struct rte_flow_action_mark *)
15375                                         (act->conf))->id);
15376
15377                                 if (i >= MLX5_MTR_RTE_COLORS)
15378                                         return -rte_mtr_error_set(error,
15379                                           ENOTSUP,
15380                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15381                                           NULL,
15382                                           "cannot create policy "
15383                                           "mark action for this color");
15384                                 dev_flow.handle->mark = 1;
15385                                 if (flow_dv_tag_resource_register(dev, tag_be,
15386                                                   &dev_flow, &flow_err))
15387                                         return -rte_mtr_error_set(error,
15388                                         ENOTSUP,
15389                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15390                                         NULL,
15391                                         "cannot setup policy mark action");
15392                                 MLX5_ASSERT(dev_flow.dv.tag_resource);
15393                                 act_cnt->rix_mark =
15394                                         dev_flow.handle->dvh.rix_tag;
15395                                 action_flags |= MLX5_FLOW_ACTION_MARK;
15396                                 break;
15397                         }
15398                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
15399                                 if (i >= MLX5_MTR_RTE_COLORS)
15400                                         return -rte_mtr_error_set(error,
15401                                           ENOTSUP,
15402                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15403                                           NULL,
15404                                           "cannot create policy "
15405                                           "set tag action for this color");
15406                                 if (flow_dv_convert_action_set_tag
15407                                 (dev, mhdr_res,
15408                                 (const struct rte_flow_action_set_tag *)
15409                                 act->conf,  &flow_err))
15410                                         return -rte_mtr_error_set(error,
15411                                         ENOTSUP,
15412                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15413                                         NULL, "cannot convert policy "
15414                                         "set tag action");
15415                                 if (!mhdr_res->actions_num)
15416                                         return -rte_mtr_error_set(error,
15417                                         ENOTSUP,
15418                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15419                                         NULL, "cannot find policy "
15420                                         "set tag action");
15421                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
15422                                 break;
15423                         case RTE_FLOW_ACTION_TYPE_DROP:
15424                         {
15425                                 struct mlx5_flow_mtr_mng *mtrmng =
15426                                                 priv->sh->mtrmng;
15427                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15428
15429                                 /*
15430                                  * Create the drop table with
15431                                  * METER DROP level.
15432                                  */
15433                                 if (!mtrmng->drop_tbl[domain]) {
15434                                         mtrmng->drop_tbl[domain] =
15435                                         flow_dv_tbl_resource_get(dev,
15436                                         MLX5_FLOW_TABLE_LEVEL_METER,
15437                                         egress, transfer, false, NULL, 0,
15438                                         0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
15439                                         if (!mtrmng->drop_tbl[domain])
15440                                                 return -rte_mtr_error_set
15441                                         (error, ENOTSUP,
15442                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15443                                         NULL,
15444                                         "Failed to create meter drop table");
15445                                 }
15446                                 tbl_data = container_of
15447                                 (mtrmng->drop_tbl[domain],
15448                                 struct mlx5_flow_tbl_data_entry, tbl);
15449                                 if (i < MLX5_MTR_RTE_COLORS) {
15450                                         act_cnt->dr_jump_action[domain] =
15451                                                 tbl_data->jump.action;
15452                                         act_cnt->fate_action =
15453                                                 MLX5_FLOW_FATE_DROP;
15454                                 }
15455                                 if (i == RTE_COLOR_RED)
15456                                         mtr_policy->dr_drop_action[domain] =
15457                                                 tbl_data->jump.action;
15458                                 action_flags |= MLX5_FLOW_ACTION_DROP;
15459                                 break;
15460                         }
15461                         case RTE_FLOW_ACTION_TYPE_QUEUE:
15462                         {
15463                                 if (i >= MLX5_MTR_RTE_COLORS)
15464                                         return -rte_mtr_error_set(error,
15465                                         ENOTSUP,
15466                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15467                                         NULL, "cannot create policy "
15468                                         "fate queue for this color");
15469                                 act_cnt->queue =
15470                                 ((const struct rte_flow_action_queue *)
15471                                         (act->conf))->index;
15472                                 act_cnt->fate_action =
15473                                         MLX5_FLOW_FATE_QUEUE;
15474                                 dev_flow.handle->fate_action =
15475                                         MLX5_FLOW_FATE_QUEUE;
15476                                 mtr_policy->is_queue = 1;
15477                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
15478                                 break;
15479                         }
15480                         case RTE_FLOW_ACTION_TYPE_RSS:
15481                         {
15482                                 int rss_size;
15483
15484                                 if (i >= MLX5_MTR_RTE_COLORS)
15485                                         return -rte_mtr_error_set(error,
15486                                           ENOTSUP,
15487                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15488                                           NULL,
15489                                           "cannot create policy "
15490                                           "rss action for this color");
15491                                 /*
15492                                  * Save RSS conf into policy struct
15493                                  * for translate stage.
15494                                  */
15495                                 rss_size = (int)rte_flow_conv
15496                                         (RTE_FLOW_CONV_OP_ACTION,
15497                                         NULL, 0, act, &flow_err);
15498                                 if (rss_size <= 0)
15499                                         return -rte_mtr_error_set(error,
15500                                           ENOTSUP,
15501                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15502                                           NULL, "Get the wrong "
15503                                           "rss action struct size");
15504                                 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
15505                                                 rss_size, 0, SOCKET_ID_ANY);
15506                                 if (!act_cnt->rss)
15507                                         return -rte_mtr_error_set(error,
15508                                           ENOTSUP,
15509                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15510                                           NULL,
15511                                           "Fail to malloc rss action memory");
15512                                 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
15513                                         act_cnt->rss, rss_size,
15514                                         act, &flow_err);
15515                                 if (ret < 0)
15516                                         return -rte_mtr_error_set(error,
15517                                           ENOTSUP,
15518                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15519                                           NULL, "Fail to save "
15520                                           "rss action into policy struct");
15521                                 act_cnt->fate_action =
15522                                         MLX5_FLOW_FATE_SHARED_RSS;
15523                                 action_flags |= MLX5_FLOW_ACTION_RSS;
15524                                 break;
15525                         }
15526                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
15527                         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
15528                         {
15529                                 struct mlx5_flow_dv_port_id_action_resource
15530                                         port_id_resource;
15531                                 uint32_t port_id = 0;
15532
15533                                 if (i >= MLX5_MTR_RTE_COLORS)
15534                                         return -rte_mtr_error_set(error,
15535                                         ENOTSUP,
15536                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15537                                         NULL, "cannot create policy "
15538                                         "port action for this color");
15539                                 memset(&port_id_resource, 0,
15540                                         sizeof(port_id_resource));
15541                                 if (flow_dv_translate_action_port_id(dev, act,
15542                                                 &port_id, &flow_err))
15543                                         return -rte_mtr_error_set(error,
15544                                         ENOTSUP,
15545                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15546                                         NULL, "cannot translate "
15547                                         "policy port action");
15548                                 port_id_resource.port_id = port_id;
15549                                 if (flow_dv_port_id_action_resource_register
15550                                         (dev, &port_id_resource,
15551                                         &dev_flow, &flow_err))
15552                                         return -rte_mtr_error_set(error,
15553                                         ENOTSUP,
15554                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15555                                         NULL, "cannot setup "
15556                                         "policy port action");
15557                                 act_cnt->rix_port_id_action =
15558                                         dev_flow.handle->rix_port_id_action;
15559                                 act_cnt->fate_action =
15560                                         MLX5_FLOW_FATE_PORT_ID;
15561                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
15562                                 break;
15563                         }
15564                         case RTE_FLOW_ACTION_TYPE_JUMP:
15565                         {
15566                                 uint32_t jump_group = 0;
15567                                 uint32_t table = 0;
15568                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15569                                 struct flow_grp_info grp_info = {
15570                                         .external = !!dev_flow.external,
15571                                         .transfer = !!transfer,
15572                                         .fdb_def_rule = !!priv->fdb_def_rule,
15573                                         .std_tbl_fix = 0,
15574                                         .skip_scale = dev_flow.skip_scale &
15575                                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
15576                                 };
15577                                 struct mlx5_flow_meter_sub_policy *sub_policy =
15578                                         mtr_policy->sub_policys[domain][0];
15579
15580                                 if (i >= MLX5_MTR_RTE_COLORS)
15581                                         return -rte_mtr_error_set(error,
15582                                           ENOTSUP,
15583                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15584                                           NULL,
15585                                           "cannot create policy "
15586                                           "jump action for this color");
15587                                 jump_group =
15588                                 ((const struct rte_flow_action_jump *)
15589                                                         act->conf)->group;
15590                                 if (mlx5_flow_group_to_table(dev, NULL,
15591                                                        jump_group,
15592                                                        &table,
15593                                                        &grp_info, &flow_err))
15594                                         return -rte_mtr_error_set(error,
15595                                         ENOTSUP,
15596                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15597                                         NULL, "cannot setup "
15598                                         "policy jump action");
15599                                 sub_policy->jump_tbl[i] =
15600                                 flow_dv_tbl_resource_get(dev,
15601                                         table, egress,
15602                                         transfer,
15603                                         !!dev_flow.external,
15604                                         NULL, jump_group, 0,
15605                                         0, &flow_err);
15606                                 if
15607                                 (!sub_policy->jump_tbl[i])
15608                                         return  -rte_mtr_error_set(error,
15609                                         ENOTSUP,
15610                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15611                                         NULL, "cannot create jump action.");
15612                                 tbl_data = container_of
15613                                 (sub_policy->jump_tbl[i],
15614                                 struct mlx5_flow_tbl_data_entry, tbl);
15615                                 act_cnt->dr_jump_action[domain] =
15616                                         tbl_data->jump.action;
15617                                 act_cnt->fate_action =
15618                                         MLX5_FLOW_FATE_JUMP;
15619                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
15620                                 break;
15621                         }
15622                         /*
15623                          * No need to check meter hierarchy for Y or R colors
15624                          * here since it is done in the validation stage.
15625                          */
15626                         case RTE_FLOW_ACTION_TYPE_METER:
15627                         {
15628                                 const struct rte_flow_action_meter *mtr;
15629                                 struct mlx5_flow_meter_info *next_fm;
15630                                 struct mlx5_flow_meter_policy *next_policy;
15631                                 struct rte_flow_action tag_action;
15632                                 struct mlx5_rte_flow_action_set_tag set_tag;
15633                                 uint32_t next_mtr_idx = 0;
15634
15635                                 mtr = act->conf;
15636                                 next_fm = mlx5_flow_meter_find(priv,
15637                                                         mtr->mtr_id,
15638                                                         &next_mtr_idx);
15639                                 if (!next_fm)
15640                                         return -rte_mtr_error_set(error, EINVAL,
15641                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15642                                                 "Fail to find next meter.");
15643                                 if (next_fm->def_policy)
15644                                         return -rte_mtr_error_set(error, EINVAL,
15645                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15646                                 "Hierarchy only supports termination meter.");
15647                                 next_policy = mlx5_flow_meter_policy_find(dev,
15648                                                 next_fm->policy_id, NULL);
15649                                 MLX5_ASSERT(next_policy);
15650                                 if (next_fm->drop_cnt) {
15651                                         set_tag.id =
15652                                                 (enum modify_reg)
15653                                                 mlx5_flow_get_reg_id(dev,
15654                                                 MLX5_MTR_ID,
15655                                                 0,
15656                                                 (struct rte_flow_error *)error);
15657                                         set_tag.offset = (priv->mtr_reg_share ?
15658                                                 MLX5_MTR_COLOR_BITS : 0);
15659                                         set_tag.length = (priv->mtr_reg_share ?
15660                                                MLX5_MTR_IDLE_BITS_IN_COLOR_REG :
15661                                                MLX5_REG_BITS);
15662                                         set_tag.data = next_mtr_idx;
15663                                         tag_action.type =
15664                                                 (enum rte_flow_action_type)
15665                                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG;
15666                                         tag_action.conf = &set_tag;
15667                                         if (flow_dv_convert_action_set_reg
15668                                                 (mhdr_res, &tag_action,
15669                                                 (struct rte_flow_error *)error))
15670                                                 return -rte_errno;
15671                                         action_flags |=
15672                                                 MLX5_FLOW_ACTION_SET_TAG;
15673                                 }
15674                                 act_cnt->fate_action = MLX5_FLOW_FATE_MTR;
15675                                 act_cnt->next_mtr_id = next_fm->meter_id;
15676                                 act_cnt->next_sub_policy = NULL;
15677                                 mtr_policy->is_hierarchy = 1;
15678                                 mtr_policy->dev = next_policy->dev;
15679                                 action_flags |=
15680                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
15681                                 break;
15682                         }
15683                         default:
15684                                 return -rte_mtr_error_set(error, ENOTSUP,
15685                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15686                                           NULL, "action type not supported");
15687                         }
15688                         if (action_flags & MLX5_FLOW_ACTION_SET_TAG) {
15689                                 /* create modify action if needed. */
15690                                 dev_flow.dv.group = 1;
15691                                 if (flow_dv_modify_hdr_resource_register
15692                                         (dev, mhdr_res, &dev_flow, &flow_err))
15693                                         return -rte_mtr_error_set(error,
15694                                                 ENOTSUP,
15695                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15696                                                 NULL, "cannot register policy "
15697                                                 "set tag action");
15698                                 act_cnt->modify_hdr =
15699                                         dev_flow.handle->dvh.modify_hdr;
15700                         }
15701                 }
15702         }
15703         return 0;
15704 }
15705
15706 /**
15707  * Create policy action per domain, lock free,
15708  * (mutex should be acquired by caller).
15709  * Dispatcher for action type specific call.
15710  *
15711  * @param[in] dev
15712  *   Pointer to the Ethernet device structure.
15713  * @param[in] mtr_policy
15714  *   Meter policy struct.
15715  * @param[in] action
15716  *   Action specification used to create meter actions.
15717  * @param[out] error
15718  *   Perform verbose error reporting if not NULL. Initialized in case of
15719  *   error only.
15720  *
15721  * @return
15722  *   0 on success, otherwise negative errno value.
15723  */
15724 static int
15725 flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
15726                       struct mlx5_flow_meter_policy *mtr_policy,
15727                       const struct rte_flow_action *actions[RTE_COLORS],
15728                       struct rte_mtr_error *error)
15729 {
15730         int ret, i;
15731         uint16_t sub_policy_num;
15732
15733         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15734                 sub_policy_num = (mtr_policy->sub_policy_num >>
15735                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15736                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15737                 if (sub_policy_num) {
15738                         ret = __flow_dv_create_domain_policy_acts(dev,
15739                                 mtr_policy, actions,
15740                                 (enum mlx5_meter_domain)i, error);
15741                         /* Cleaning resource is done in the caller level. */
15742                         if (ret)
15743                                 return ret;
15744                 }
15745         }
15746         return 0;
15747 }
15748
15749 /**
15750  * Query a DV flow rule for its statistics via DevX.
15751  *
15752  * @param[in] dev
15753  *   Pointer to Ethernet device.
15754  * @param[in] cnt_idx
15755  *   Index to the flow counter.
15756  * @param[out] data
15757  *   Data retrieved by the query.
15758  * @param[out] error
15759  *   Perform verbose error reporting if not NULL.
15760  *
15761  * @return
15762  *   0 on success, a negative errno value otherwise and rte_errno is set.
15763  */
15764 int
15765 flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
15766                     struct rte_flow_error *error)
15767 {
15768         struct mlx5_priv *priv = dev->data->dev_private;
15769         struct rte_flow_query_count *qc = data;
15770
15771         if (!priv->sh->devx)
15772                 return rte_flow_error_set(error, ENOTSUP,
15773                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15774                                           NULL,
15775                                           "counters are not supported");
15776         if (cnt_idx) {
15777                 uint64_t pkts, bytes;
15778                 struct mlx5_flow_counter *cnt;
15779                 int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
15780
15781                 if (err)
15782                         return rte_flow_error_set(error, -err,
15783                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15784                                         NULL, "cannot read counters");
15785                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15786                 qc->hits_set = 1;
15787                 qc->bytes_set = 1;
15788                 qc->hits = pkts - cnt->hits;
15789                 qc->bytes = bytes - cnt->bytes;
15790                 if (qc->reset) {
15791                         cnt->hits = pkts;
15792                         cnt->bytes = bytes;
15793                 }
15794                 return 0;
15795         }
15796         return rte_flow_error_set(error, EINVAL,
15797                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15798                                   NULL,
15799                                   "counters are not available");
15800 }
15801
15802
15803 /**
15804  * Query counter's action pointer for a DV flow rule via DevX.
15805  *
15806  * @param[in] dev
15807  *   Pointer to Ethernet device.
15808  * @param[in] cnt_idx
15809  *   Index to the flow counter.
15810  * @param[out] action_ptr
15811  *   Action pointer for counter.
15812  * @param[out] error
15813  *   Perform verbose error reporting if not NULL.
15814  *
15815  * @return
15816  *   0 on success, a negative errno value otherwise and rte_errno is set.
15817  */
15818 int
15819 flow_dv_query_count_ptr(struct rte_eth_dev *dev, uint32_t cnt_idx,
15820         void **action_ptr, struct rte_flow_error *error)
15821 {
15822         struct mlx5_priv *priv = dev->data->dev_private;
15823
15824         if (!priv->sh->devx || !action_ptr)
15825                 return rte_flow_error_set(error, ENOTSUP,
15826                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15827                                           NULL,
15828                                           "counters are not supported");
15829
15830         if (cnt_idx) {
15831                 struct mlx5_flow_counter *cnt = NULL;
15832                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15833                 if (cnt) {
15834                         *action_ptr = cnt->action;
15835                         return 0;
15836                 }
15837         }
15838         return rte_flow_error_set(error, EINVAL,
15839                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15840                                   NULL,
15841                                   "counters are not available");
15842 }
15843
15844 static int
15845 flow_dv_action_query(struct rte_eth_dev *dev,
15846                      const struct rte_flow_action_handle *handle, void *data,
15847                      struct rte_flow_error *error)
15848 {
15849         struct mlx5_age_param *age_param;
15850         struct rte_flow_query_age *resp;
15851         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15852         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15853         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15854         struct mlx5_priv *priv = dev->data->dev_private;
15855         struct mlx5_aso_ct_action *ct;
15856         uint16_t owner;
15857         uint32_t dev_idx;
15858
15859         switch (type) {
15860         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15861                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
15862                 resp = data;
15863                 resp->aged = __atomic_load_n(&age_param->state,
15864                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
15865                                                                           1 : 0;
15866                 resp->sec_since_last_hit_valid = !resp->aged;
15867                 if (resp->sec_since_last_hit_valid)
15868                         resp->sec_since_last_hit = __atomic_load_n
15869                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15870                 return 0;
15871         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15872                 return flow_dv_query_count(dev, idx, data, error);
15873         case MLX5_INDIRECT_ACTION_TYPE_CT:
15874                 owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15875                 if (owner != PORT_ID(priv))
15876                         return rte_flow_error_set(error, EACCES,
15877                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15878                                         NULL,
15879                                         "CT object owned by another port");
15880                 dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15881                 ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15882                 MLX5_ASSERT(ct);
15883                 if (!ct->refcnt)
15884                         return rte_flow_error_set(error, EFAULT,
15885                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15886                                         NULL,
15887                                         "CT object is inactive");
15888                 ((struct rte_flow_action_conntrack *)data)->peer_port =
15889                                                         ct->peer;
15890                 ((struct rte_flow_action_conntrack *)data)->is_original_dir =
15891                                                         ct->is_original;
15892                 if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))
15893                         return rte_flow_error_set(error, EIO,
15894                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15895                                         NULL,
15896                                         "Failed to query CT context");
15897                 return 0;
15898         default:
15899                 return rte_flow_error_set(error, ENOTSUP,
15900                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15901                                           "action type query not supported");
15902         }
15903 }
15904
15905 /**
15906  * Query a flow rule AGE action for aging information.
15907  *
15908  * @param[in] dev
15909  *   Pointer to Ethernet device.
15910  * @param[in] flow
15911  *   Pointer to the sub flow.
15912  * @param[out] data
15913  *   data retrieved by the query.
15914  * @param[out] error
15915  *   Perform verbose error reporting if not NULL.
15916  *
15917  * @return
15918  *   0 on success, a negative errno value otherwise and rte_errno is set.
15919  */
15920 static int
15921 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
15922                   void *data, struct rte_flow_error *error)
15923 {
15924         struct rte_flow_query_age *resp = data;
15925         struct mlx5_age_param *age_param;
15926
15927         if (flow->age) {
15928                 struct mlx5_aso_age_action *act =
15929                                      flow_aso_age_get_by_idx(dev, flow->age);
15930
15931                 age_param = &act->age_params;
15932         } else if (flow->counter) {
15933                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
15934
15935                 if (!age_param || !age_param->timeout)
15936                         return rte_flow_error_set
15937                                         (error, EINVAL,
15938                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15939                                          NULL, "cannot read age data");
15940         } else {
15941                 return rte_flow_error_set(error, EINVAL,
15942                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15943                                           NULL, "age data not available");
15944         }
15945         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
15946                                      AGE_TMOUT ? 1 : 0;
15947         resp->sec_since_last_hit_valid = !resp->aged;
15948         if (resp->sec_since_last_hit_valid)
15949                 resp->sec_since_last_hit = __atomic_load_n
15950                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15951         return 0;
15952 }
15953
15954 /**
15955  * Query a flow.
15956  *
15957  * @see rte_flow_query()
15958  * @see rte_flow_ops
15959  */
15960 static int
15961 flow_dv_query(struct rte_eth_dev *dev,
15962               struct rte_flow *flow __rte_unused,
15963               const struct rte_flow_action *actions __rte_unused,
15964               void *data __rte_unused,
15965               struct rte_flow_error *error __rte_unused)
15966 {
15967         int ret = -EINVAL;
15968
15969         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
15970                 switch (actions->type) {
15971                 case RTE_FLOW_ACTION_TYPE_VOID:
15972                         break;
15973                 case RTE_FLOW_ACTION_TYPE_COUNT:
15974                         ret = flow_dv_query_count(dev, flow->counter, data,
15975                                                   error);
15976                         break;
15977                 case RTE_FLOW_ACTION_TYPE_AGE:
15978                         ret = flow_dv_query_age(dev, flow, data, error);
15979                         break;
15980                 default:
15981                         return rte_flow_error_set(error, ENOTSUP,
15982                                                   RTE_FLOW_ERROR_TYPE_ACTION,
15983                                                   actions,
15984                                                   "action not supported");
15985                 }
15986         }
15987         return ret;
15988 }
15989
15990 /**
15991  * Destroy the meter table set.
15992  * Lock free, (mutex should be acquired by caller).
15993  *
15994  * @param[in] dev
15995  *   Pointer to Ethernet device.
15996  * @param[in] fm
15997  *   Meter information table.
15998  */
15999 static void
16000 flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
16001                         struct mlx5_flow_meter_info *fm)
16002 {
16003         struct mlx5_priv *priv = dev->data->dev_private;
16004         int i;
16005
16006         if (!fm || !priv->config.dv_flow_en)
16007                 return;
16008         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16009                 if (fm->drop_rule[i]) {
16010                         claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
16011                         fm->drop_rule[i] = NULL;
16012                 }
16013         }
16014 }
16015
16016 static void
16017 flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
16018 {
16019         struct mlx5_priv *priv = dev->data->dev_private;
16020         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16021         struct mlx5_flow_tbl_data_entry *tbl;
16022         int i, j;
16023
16024         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16025                 if (mtrmng->def_rule[i]) {
16026                         claim_zero(mlx5_flow_os_destroy_flow
16027                                         (mtrmng->def_rule[i]));
16028                         mtrmng->def_rule[i] = NULL;
16029                 }
16030                 if (mtrmng->def_matcher[i]) {
16031                         tbl = container_of(mtrmng->def_matcher[i]->tbl,
16032                                 struct mlx5_flow_tbl_data_entry, tbl);
16033                         mlx5_list_unregister(tbl->matchers,
16034                                              &mtrmng->def_matcher[i]->entry);
16035                         mtrmng->def_matcher[i] = NULL;
16036                 }
16037                 for (j = 0; j < MLX5_REG_BITS; j++) {
16038                         if (mtrmng->drop_matcher[i][j]) {
16039                                 tbl =
16040                                 container_of(mtrmng->drop_matcher[i][j]->tbl,
16041                                              struct mlx5_flow_tbl_data_entry,
16042                                              tbl);
16043                                 mlx5_list_unregister(tbl->matchers,
16044                                             &mtrmng->drop_matcher[i][j]->entry);
16045                                 mtrmng->drop_matcher[i][j] = NULL;
16046                         }
16047                 }
16048                 if (mtrmng->drop_tbl[i]) {
16049                         flow_dv_tbl_resource_release(MLX5_SH(dev),
16050                                 mtrmng->drop_tbl[i]);
16051                         mtrmng->drop_tbl[i] = NULL;
16052                 }
16053         }
16054 }
16055
16056 /* Number of meter flow actions, count and jump or count and drop. */
16057 #define METER_ACTIONS 2
16058
16059 static void
16060 __flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
16061                                     enum mlx5_meter_domain domain)
16062 {
16063         struct mlx5_priv *priv = dev->data->dev_private;
16064         struct mlx5_flow_meter_def_policy *def_policy =
16065                         priv->sh->mtrmng->def_policy[domain];
16066
16067         __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
16068         mlx5_free(def_policy);
16069         priv->sh->mtrmng->def_policy[domain] = NULL;
16070 }
16071
16072 /**
16073  * Destroy the default policy table set.
16074  *
16075  * @param[in] dev
16076  *   Pointer to Ethernet device.
16077  */
16078 static void
16079 flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
16080 {
16081         struct mlx5_priv *priv = dev->data->dev_private;
16082         int i;
16083
16084         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
16085                 if (priv->sh->mtrmng->def_policy[i])
16086                         __flow_dv_destroy_domain_def_policy(dev,
16087                                         (enum mlx5_meter_domain)i);
16088         priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
16089 }
16090
16091 static int
16092 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
16093                         uint32_t color_reg_c_idx,
16094                         enum rte_color color, void *matcher_object,
16095                         int actions_n, void *actions,
16096                         bool match_src_port, const struct rte_flow_item *item,
16097                         void **rule, const struct rte_flow_attr *attr)
16098 {
16099         int ret;
16100         struct mlx5_flow_dv_match_params value = {
16101                 .size = sizeof(value.buf),
16102         };
16103         struct mlx5_flow_dv_match_params matcher = {
16104                 .size = sizeof(matcher.buf),
16105         };
16106         struct mlx5_priv *priv = dev->data->dev_private;
16107         uint8_t misc_mask;
16108
16109         if (match_src_port && (priv->representor || priv->master)) {
16110                 if (flow_dv_translate_item_port_id(dev, matcher.buf,
16111                                                    value.buf, item, attr)) {
16112                         DRV_LOG(ERR, "Failed to create meter policy%d flow's"
16113                                 " value with port.", color);
16114                         return -1;
16115                 }
16116         }
16117         flow_dv_match_meta_reg(matcher.buf, value.buf,
16118                                (enum modify_reg)color_reg_c_idx,
16119                                rte_col_2_mlx5_col(color), UINT32_MAX);
16120         misc_mask = flow_dv_matcher_enable(value.buf);
16121         __flow_dv_adjust_buf_size(&value.size, misc_mask);
16122         ret = mlx5_flow_os_create_flow(matcher_object, (void *)&value,
16123                                        actions_n, actions, rule);
16124         if (ret) {
16125                 DRV_LOG(ERR, "Failed to create meter policy%d flow.", color);
16126                 return -1;
16127         }
16128         return 0;
16129 }
16130
16131 static int
16132 __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
16133                         uint32_t color_reg_c_idx,
16134                         uint16_t priority,
16135                         struct mlx5_flow_meter_sub_policy *sub_policy,
16136                         const struct rte_flow_attr *attr,
16137                         bool match_src_port,
16138                         const struct rte_flow_item *item,
16139                         struct mlx5_flow_dv_matcher **policy_matcher,
16140                         struct rte_flow_error *error)
16141 {
16142         struct mlx5_list_entry *entry;
16143         struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
16144         struct mlx5_flow_dv_matcher matcher = {
16145                 .mask = {
16146                         .size = sizeof(matcher.mask.buf),
16147                 },
16148                 .tbl = tbl_rsc,
16149         };
16150         struct mlx5_flow_dv_match_params value = {
16151                 .size = sizeof(value.buf),
16152         };
16153         struct mlx5_flow_cb_ctx ctx = {
16154                 .error = error,
16155                 .data = &matcher,
16156         };
16157         struct mlx5_flow_tbl_data_entry *tbl_data;
16158         struct mlx5_priv *priv = dev->data->dev_private;
16159         const uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
16160
16161         if (match_src_port && (priv->representor || priv->master)) {
16162                 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
16163                                                    value.buf, item, attr)) {
16164                         DRV_LOG(ERR, "Failed to register meter policy%d matcher"
16165                                 " with port.", priority);
16166                         return -1;
16167                 }
16168         }
16169         tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
16170         if (priority < RTE_COLOR_RED)
16171                 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16172                         (enum modify_reg)color_reg_c_idx, 0, color_mask);
16173         matcher.priority = priority;
16174         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
16175                                     matcher.mask.size);
16176         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16177         if (!entry) {
16178                 DRV_LOG(ERR, "Failed to register meter drop matcher.");
16179                 return -1;
16180         }
16181         *policy_matcher =
16182                 container_of(entry, struct mlx5_flow_dv_matcher, entry);
16183         return 0;
16184 }
16185
16186 /**
16187  * Create the policy rules per domain.
16188  *
16189  * @param[in] dev
16190  *   Pointer to Ethernet device.
16191  * @param[in] sub_policy
16192  *    Pointer to sub policy table..
16193  * @param[in] egress
16194  *   Direction of the table.
16195  * @param[in] transfer
16196  *   E-Switch or NIC flow.
16197  * @param[in] acts
16198  *   Pointer to policy action list per color.
16199  *
16200  * @return
16201  *   0 on success, -1 otherwise.
16202  */
16203 static int
16204 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
16205                 struct mlx5_flow_meter_sub_policy *sub_policy,
16206                 uint8_t egress, uint8_t transfer, bool match_src_port,
16207                 struct mlx5_meter_policy_acts acts[RTE_COLORS])
16208 {
16209         struct mlx5_priv *priv = dev->data->dev_private;
16210         struct rte_flow_error flow_err;
16211         uint32_t color_reg_c_idx;
16212         struct rte_flow_attr attr = {
16213                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
16214                 .priority = 0,
16215                 .ingress = 0,
16216                 .egress = !!egress,
16217                 .transfer = !!transfer,
16218                 .reserved = 0,
16219         };
16220         int i;
16221         int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
16222         struct mlx5_sub_policy_color_rule *color_rule;
16223         bool svport_match;
16224         struct mlx5_sub_policy_color_rule *tmp_rules[RTE_COLORS] = {NULL};
16225
16226         if (ret < 0)
16227                 return -1;
16228         /* Create policy table with POLICY level. */
16229         if (!sub_policy->tbl_rsc)
16230                 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
16231                                 MLX5_FLOW_TABLE_LEVEL_POLICY,
16232                                 egress, transfer, false, NULL, 0, 0,
16233                                 sub_policy->idx, &flow_err);
16234         if (!sub_policy->tbl_rsc) {
16235                 DRV_LOG(ERR,
16236                         "Failed to create meter sub policy table.");
16237                 return -1;
16238         }
16239         /* Prepare matchers. */
16240         color_reg_c_idx = ret;
16241         for (i = 0; i < RTE_COLORS; i++) {
16242                 TAILQ_INIT(&sub_policy->color_rules[i]);
16243                 if (!acts[i].actions_n)
16244                         continue;
16245                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
16246                                 sizeof(struct mlx5_sub_policy_color_rule),
16247                                 0, SOCKET_ID_ANY);
16248                 if (!color_rule) {
16249                         DRV_LOG(ERR, "No memory to create color rule.");
16250                         goto err_exit;
16251                 }
16252                 tmp_rules[i] = color_rule;
16253                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
16254                                   color_rule, next_port);
16255                 color_rule->src_port = priv->representor_id;
16256                 /* No use. */
16257                 attr.priority = i;
16258                 /* Create matchers for colors. */
16259                 svport_match = (i != RTE_COLOR_RED) ? match_src_port : false;
16260                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
16261                                 MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
16262                                 &attr, svport_match, NULL,
16263                                 &color_rule->matcher, &flow_err)) {
16264                         DRV_LOG(ERR, "Failed to create color%u matcher.", i);
16265                         goto err_exit;
16266                 }
16267                 /* Create flow, matching color. */
16268                 if (__flow_dv_create_policy_flow(dev,
16269                                 color_reg_c_idx, (enum rte_color)i,
16270                                 color_rule->matcher->matcher_object,
16271                                 acts[i].actions_n, acts[i].dv_actions,
16272                                 svport_match, NULL, &color_rule->rule,
16273                                 &attr)) {
16274                         DRV_LOG(ERR, "Failed to create color%u rule.", i);
16275                         goto err_exit;
16276                 }
16277         }
16278         return 0;
16279 err_exit:
16280         /* All the policy rules will be cleared. */
16281         do {
16282                 color_rule = tmp_rules[i];
16283                 if (color_rule) {
16284                         if (color_rule->rule)
16285                                 mlx5_flow_os_destroy_flow(color_rule->rule);
16286                         if (color_rule->matcher) {
16287                                 struct mlx5_flow_tbl_data_entry *tbl =
16288                                         container_of(color_rule->matcher->tbl,
16289                                                      typeof(*tbl), tbl);
16290                                 mlx5_list_unregister(tbl->matchers,
16291                                                 &color_rule->matcher->entry);
16292                         }
16293                         TAILQ_REMOVE(&sub_policy->color_rules[i],
16294                                      color_rule, next_port);
16295                         mlx5_free(color_rule);
16296                 }
16297         } while (i--);
16298         return -1;
16299 }
16300
16301 static int
16302 __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
16303                         struct mlx5_flow_meter_policy *mtr_policy,
16304                         struct mlx5_flow_meter_sub_policy *sub_policy,
16305                         uint32_t domain)
16306 {
16307         struct mlx5_priv *priv = dev->data->dev_private;
16308         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16309         struct mlx5_flow_dv_tag_resource *tag;
16310         struct mlx5_flow_dv_port_id_action_resource *port_action;
16311         struct mlx5_hrxq *hrxq;
16312         struct mlx5_flow_meter_info *next_fm = NULL;
16313         struct mlx5_flow_meter_policy *next_policy;
16314         struct mlx5_flow_meter_sub_policy *next_sub_policy;
16315         struct mlx5_flow_tbl_data_entry *tbl_data;
16316         struct rte_flow_error error;
16317         uint8_t egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16318         uint8_t transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16319         bool mtr_first = egress || (transfer && priv->representor_id != UINT16_MAX);
16320         bool match_src_port = false;
16321         int i;
16322
16323         /* If RSS or Queue, no previous actions / rules is created. */
16324         for (i = 0; i < RTE_COLORS; i++) {
16325                 acts[i].actions_n = 0;
16326                 if (i == RTE_COLOR_RED) {
16327                         /* Only support drop on red. */
16328                         acts[i].dv_actions[0] =
16329                                 mtr_policy->dr_drop_action[domain];
16330                         acts[i].actions_n = 1;
16331                         continue;
16332                 }
16333                 if (i == RTE_COLOR_GREEN &&
16334                     mtr_policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR) {
16335                         struct rte_flow_attr attr = {
16336                                 .transfer = transfer
16337                         };
16338
16339                         next_fm = mlx5_flow_meter_find(priv,
16340                                         mtr_policy->act_cnt[i].next_mtr_id,
16341                                         NULL);
16342                         if (!next_fm) {
16343                                 DRV_LOG(ERR,
16344                                         "Failed to get next hierarchy meter.");
16345                                 goto err_exit;
16346                         }
16347                         if (mlx5_flow_meter_attach(priv, next_fm,
16348                                                    &attr, &error)) {
16349                                 DRV_LOG(ERR, "%s", error.message);
16350                                 next_fm = NULL;
16351                                 goto err_exit;
16352                         }
16353                         /* Meter action must be the first for TX. */
16354                         if (mtr_first) {
16355                                 acts[i].dv_actions[acts[i].actions_n] =
16356                                         next_fm->meter_action;
16357                                 acts[i].actions_n++;
16358                         }
16359                 }
16360                 if (mtr_policy->act_cnt[i].rix_mark) {
16361                         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
16362                                         mtr_policy->act_cnt[i].rix_mark);
16363                         if (!tag) {
16364                                 DRV_LOG(ERR, "Failed to find "
16365                                 "mark action for policy.");
16366                                 goto err_exit;
16367                         }
16368                         acts[i].dv_actions[acts[i].actions_n] = tag->action;
16369                         acts[i].actions_n++;
16370                 }
16371                 if (mtr_policy->act_cnt[i].modify_hdr) {
16372                         acts[i].dv_actions[acts[i].actions_n] =
16373                                 mtr_policy->act_cnt[i].modify_hdr->action;
16374                         acts[i].actions_n++;
16375                 }
16376                 if (mtr_policy->act_cnt[i].fate_action) {
16377                         switch (mtr_policy->act_cnt[i].fate_action) {
16378                         case MLX5_FLOW_FATE_PORT_ID:
16379                                 port_action = mlx5_ipool_get
16380                                         (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
16381                                 mtr_policy->act_cnt[i].rix_port_id_action);
16382                                 if (!port_action) {
16383                                         DRV_LOG(ERR, "Failed to find "
16384                                                 "port action for policy.");
16385                                         goto err_exit;
16386                                 }
16387                                 acts[i].dv_actions[acts[i].actions_n] =
16388                                         port_action->action;
16389                                 acts[i].actions_n++;
16390                                 mtr_policy->dev = dev;
16391                                 match_src_port = true;
16392                                 break;
16393                         case MLX5_FLOW_FATE_DROP:
16394                         case MLX5_FLOW_FATE_JUMP:
16395                                 acts[i].dv_actions[acts[i].actions_n] =
16396                                 mtr_policy->act_cnt[i].dr_jump_action[domain];
16397                                 acts[i].actions_n++;
16398                                 break;
16399                         case MLX5_FLOW_FATE_SHARED_RSS:
16400                         case MLX5_FLOW_FATE_QUEUE:
16401                                 hrxq = mlx5_ipool_get
16402                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
16403                                          sub_policy->rix_hrxq[i]);
16404                                 if (!hrxq) {
16405                                         DRV_LOG(ERR, "Failed to find "
16406                                                 "queue action for policy.");
16407                                         goto err_exit;
16408                                 }
16409                                 acts[i].dv_actions[acts[i].actions_n] =
16410                                         hrxq->action;
16411                                 acts[i].actions_n++;
16412                                 break;
16413                         case MLX5_FLOW_FATE_MTR:
16414                                 if (!next_fm) {
16415                                         DRV_LOG(ERR,
16416                                                 "No next hierarchy meter.");
16417                                         goto err_exit;
16418                                 }
16419                                 if (!mtr_first) {
16420                                         acts[i].dv_actions[acts[i].actions_n] =
16421                                                         next_fm->meter_action;
16422                                         acts[i].actions_n++;
16423                                 }
16424                                 if (mtr_policy->act_cnt[i].next_sub_policy) {
16425                                         next_sub_policy =
16426                                         mtr_policy->act_cnt[i].next_sub_policy;
16427                                 } else {
16428                                         next_policy =
16429                                                 mlx5_flow_meter_policy_find(dev,
16430                                                 next_fm->policy_id, NULL);
16431                                         MLX5_ASSERT(next_policy);
16432                                         next_sub_policy =
16433                                         next_policy->sub_policys[domain][0];
16434                                 }
16435                                 tbl_data =
16436                                         container_of(next_sub_policy->tbl_rsc,
16437                                         struct mlx5_flow_tbl_data_entry, tbl);
16438                                 acts[i].dv_actions[acts[i].actions_n++] =
16439                                                         tbl_data->jump.action;
16440                                 if (mtr_policy->act_cnt[i].modify_hdr)
16441                                         match_src_port = !!transfer;
16442                                 break;
16443                         default:
16444                                 /*Queue action do nothing*/
16445                                 break;
16446                         }
16447                 }
16448         }
16449         if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
16450                                 egress, transfer, match_src_port, acts)) {
16451                 DRV_LOG(ERR,
16452                         "Failed to create policy rules per domain.");
16453                 goto err_exit;
16454         }
16455         return 0;
16456 err_exit:
16457         if (next_fm)
16458                 mlx5_flow_meter_detach(priv, next_fm);
16459         return -1;
16460 }
16461
16462 /**
16463  * Create the policy rules.
16464  *
16465  * @param[in] dev
16466  *   Pointer to Ethernet device.
16467  * @param[in,out] mtr_policy
16468  *   Pointer to meter policy table.
16469  *
16470  * @return
16471  *   0 on success, -1 otherwise.
16472  */
16473 static int
16474 flow_dv_create_policy_rules(struct rte_eth_dev *dev,
16475                              struct mlx5_flow_meter_policy *mtr_policy)
16476 {
16477         int i;
16478         uint16_t sub_policy_num;
16479
16480         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16481                 sub_policy_num = (mtr_policy->sub_policy_num >>
16482                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
16483                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16484                 if (!sub_policy_num)
16485                         continue;
16486                 /* Prepare actions list and create policy rules. */
16487                 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16488                         mtr_policy->sub_policys[i][0], i)) {
16489                         DRV_LOG(ERR, "Failed to create policy action "
16490                                 "list per domain.");
16491                         return -1;
16492                 }
16493         }
16494         return 0;
16495 }
16496
16497 static int
16498 __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
16499 {
16500         struct mlx5_priv *priv = dev->data->dev_private;
16501         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16502         struct mlx5_flow_meter_def_policy *def_policy;
16503         struct mlx5_flow_tbl_resource *jump_tbl;
16504         struct mlx5_flow_tbl_data_entry *tbl_data;
16505         uint8_t egress, transfer;
16506         struct rte_flow_error error;
16507         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16508         int ret;
16509
16510         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16511         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16512         def_policy = mtrmng->def_policy[domain];
16513         if (!def_policy) {
16514                 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
16515                         sizeof(struct mlx5_flow_meter_def_policy),
16516                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
16517                 if (!def_policy) {
16518                         DRV_LOG(ERR, "Failed to alloc default policy table.");
16519                         goto def_policy_error;
16520                 }
16521                 mtrmng->def_policy[domain] = def_policy;
16522                 /* Create the meter suffix table with SUFFIX level. */
16523                 jump_tbl = flow_dv_tbl_resource_get(dev,
16524                                 MLX5_FLOW_TABLE_LEVEL_METER,
16525                                 egress, transfer, false, NULL, 0,
16526                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
16527                 if (!jump_tbl) {
16528                         DRV_LOG(ERR,
16529                                 "Failed to create meter suffix table.");
16530                         goto def_policy_error;
16531                 }
16532                 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
16533                 tbl_data = container_of(jump_tbl,
16534                                         struct mlx5_flow_tbl_data_entry, tbl);
16535                 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
16536                                                 tbl_data->jump.action;
16537                 acts[RTE_COLOR_GREEN].dv_actions[0] = tbl_data->jump.action;
16538                 acts[RTE_COLOR_GREEN].actions_n = 1;
16539                 /*
16540                  * YELLOW has the same default policy as GREEN does.
16541                  * G & Y share the same table and action. The 2nd time of table
16542                  * resource getting is just to update the reference count for
16543                  * the releasing stage.
16544                  */
16545                 jump_tbl = flow_dv_tbl_resource_get(dev,
16546                                 MLX5_FLOW_TABLE_LEVEL_METER,
16547                                 egress, transfer, false, NULL, 0,
16548                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
16549                 if (!jump_tbl) {
16550                         DRV_LOG(ERR,
16551                                 "Failed to get meter suffix table.");
16552                         goto def_policy_error;
16553                 }
16554                 def_policy->sub_policy.jump_tbl[RTE_COLOR_YELLOW] = jump_tbl;
16555                 tbl_data = container_of(jump_tbl,
16556                                         struct mlx5_flow_tbl_data_entry, tbl);
16557                 def_policy->dr_jump_action[RTE_COLOR_YELLOW] =
16558                                                 tbl_data->jump.action;
16559                 acts[RTE_COLOR_YELLOW].dv_actions[0] = tbl_data->jump.action;
16560                 acts[RTE_COLOR_YELLOW].actions_n = 1;
16561                 /* Create jump action to the drop table. */
16562                 if (!mtrmng->drop_tbl[domain]) {
16563                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
16564                                 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
16565                                  egress, transfer, false, NULL, 0,
16566                                  0, MLX5_MTR_TABLE_ID_DROP, &error);
16567                         if (!mtrmng->drop_tbl[domain]) {
16568                                 DRV_LOG(ERR, "Failed to create meter "
16569                                         "drop table for default policy.");
16570                                 goto def_policy_error;
16571                         }
16572                 }
16573                 /* all RED: unique Drop table for jump action. */
16574                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16575                                         struct mlx5_flow_tbl_data_entry, tbl);
16576                 def_policy->dr_jump_action[RTE_COLOR_RED] =
16577                                                 tbl_data->jump.action;
16578                 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
16579                 acts[RTE_COLOR_RED].actions_n = 1;
16580                 /* Create default policy rules. */
16581                 ret = __flow_dv_create_domain_policy_rules(dev,
16582                                         &def_policy->sub_policy,
16583                                         egress, transfer, false, acts);
16584                 if (ret) {
16585                         DRV_LOG(ERR, "Failed to create default policy rules.");
16586                         goto def_policy_error;
16587                 }
16588         }
16589         return 0;
16590 def_policy_error:
16591         __flow_dv_destroy_domain_def_policy(dev,
16592                                             (enum mlx5_meter_domain)domain);
16593         return -1;
16594 }
16595
16596 /**
16597  * Create the default policy table set.
16598  *
16599  * @param[in] dev
16600  *   Pointer to Ethernet device.
16601  * @return
16602  *   0 on success, -1 otherwise.
16603  */
16604 static int
16605 flow_dv_create_def_policy(struct rte_eth_dev *dev)
16606 {
16607         struct mlx5_priv *priv = dev->data->dev_private;
16608         int i;
16609
16610         /* Non-termination policy table. */
16611         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16612                 if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
16613                         continue;
16614                 if (__flow_dv_create_domain_def_policy(dev, i)) {
16615                         DRV_LOG(ERR, "Failed to create default policy");
16616                         /* Rollback the created default policies for others. */
16617                         flow_dv_destroy_def_policy(dev);
16618                         return -1;
16619                 }
16620         }
16621         return 0;
16622 }
16623
16624 /**
16625  * Create the needed meter tables.
16626  * Lock free, (mutex should be acquired by caller).
16627  *
16628  * @param[in] dev
16629  *   Pointer to Ethernet device.
16630  * @param[in] fm
16631  *   Meter information table.
16632  * @param[in] mtr_idx
16633  *   Meter index.
16634  * @param[in] domain_bitmap
16635  *   Domain bitmap.
16636  * @return
16637  *   0 on success, -1 otherwise.
16638  */
16639 static int
16640 flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
16641                         struct mlx5_flow_meter_info *fm,
16642                         uint32_t mtr_idx,
16643                         uint8_t domain_bitmap)
16644 {
16645         struct mlx5_priv *priv = dev->data->dev_private;
16646         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16647         struct rte_flow_error error;
16648         struct mlx5_flow_tbl_data_entry *tbl_data;
16649         uint8_t egress, transfer;
16650         void *actions[METER_ACTIONS];
16651         int domain, ret, i;
16652         struct mlx5_flow_counter *cnt;
16653         struct mlx5_flow_dv_match_params value = {
16654                 .size = sizeof(value.buf),
16655         };
16656         struct mlx5_flow_dv_match_params matcher_para = {
16657                 .size = sizeof(matcher_para.buf),
16658         };
16659         int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
16660                                                      0, &error);
16661         uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
16662         uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
16663         struct mlx5_list_entry *entry;
16664         struct mlx5_flow_dv_matcher matcher = {
16665                 .mask = {
16666                         .size = sizeof(matcher.mask.buf),
16667                 },
16668         };
16669         struct mlx5_flow_dv_matcher *drop_matcher;
16670         struct mlx5_flow_cb_ctx ctx = {
16671                 .error = &error,
16672                 .data = &matcher,
16673         };
16674         uint8_t misc_mask;
16675
16676         if (!priv->mtr_en || mtr_id_reg_c < 0) {
16677                 rte_errno = ENOTSUP;
16678                 return -1;
16679         }
16680         for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
16681                 if (!(domain_bitmap & (1 << domain)) ||
16682                         (mtrmng->def_rule[domain] && !fm->drop_cnt))
16683                         continue;
16684                 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16685                 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16686                 /* Create the drop table with METER DROP level. */
16687                 if (!mtrmng->drop_tbl[domain]) {
16688                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
16689                                         MLX5_FLOW_TABLE_LEVEL_METER,
16690                                         egress, transfer, false, NULL, 0,
16691                                         0, MLX5_MTR_TABLE_ID_DROP, &error);
16692                         if (!mtrmng->drop_tbl[domain]) {
16693                                 DRV_LOG(ERR, "Failed to create meter drop table.");
16694                                 goto policy_error;
16695                         }
16696                 }
16697                 /* Create default matcher in drop table. */
16698                 matcher.tbl = mtrmng->drop_tbl[domain],
16699                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16700                                 struct mlx5_flow_tbl_data_entry, tbl);
16701                 if (!mtrmng->def_matcher[domain]) {
16702                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16703                                        (enum modify_reg)mtr_id_reg_c,
16704                                        0, 0);
16705                         matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
16706                         matcher.crc = rte_raw_cksum
16707                                         ((const void *)matcher.mask.buf,
16708                                         matcher.mask.size);
16709                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16710                         if (!entry) {
16711                                 DRV_LOG(ERR, "Failed to register meter "
16712                                 "drop default matcher.");
16713                                 goto policy_error;
16714                         }
16715                         mtrmng->def_matcher[domain] = container_of(entry,
16716                         struct mlx5_flow_dv_matcher, entry);
16717                 }
16718                 /* Create default rule in drop table. */
16719                 if (!mtrmng->def_rule[domain]) {
16720                         i = 0;
16721                         actions[i++] = priv->sh->dr_drop_action;
16722                         flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16723                                 (enum modify_reg)mtr_id_reg_c, 0, 0);
16724                         misc_mask = flow_dv_matcher_enable(value.buf);
16725                         __flow_dv_adjust_buf_size(&value.size, misc_mask);
16726                         ret = mlx5_flow_os_create_flow
16727                                 (mtrmng->def_matcher[domain]->matcher_object,
16728                                 (void *)&value, i, actions,
16729                                 &mtrmng->def_rule[domain]);
16730                         if (ret) {
16731                                 DRV_LOG(ERR, "Failed to create meter "
16732                                 "default drop rule for drop table.");
16733                                 goto policy_error;
16734                         }
16735                 }
16736                 if (!fm->drop_cnt)
16737                         continue;
16738                 MLX5_ASSERT(mtrmng->max_mtr_bits);
16739                 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
16740                         /* Create matchers for Drop. */
16741                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16742                                         (enum modify_reg)mtr_id_reg_c, 0,
16743                                         (mtr_id_mask << mtr_id_offset));
16744                         matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
16745                         matcher.crc = rte_raw_cksum
16746                                         ((const void *)matcher.mask.buf,
16747                                         matcher.mask.size);
16748                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16749                         if (!entry) {
16750                                 DRV_LOG(ERR,
16751                                 "Failed to register meter drop matcher.");
16752                                 goto policy_error;
16753                         }
16754                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
16755                                 container_of(entry, struct mlx5_flow_dv_matcher,
16756                                              entry);
16757                 }
16758                 drop_matcher =
16759                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
16760                 /* Create drop rule, matching meter_id only. */
16761                 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16762                                 (enum modify_reg)mtr_id_reg_c,
16763                                 (mtr_idx << mtr_id_offset), UINT32_MAX);
16764                 i = 0;
16765                 cnt = flow_dv_counter_get_by_idx(dev,
16766                                         fm->drop_cnt, NULL);
16767                 actions[i++] = cnt->action;
16768                 actions[i++] = priv->sh->dr_drop_action;
16769                 misc_mask = flow_dv_matcher_enable(value.buf);
16770                 __flow_dv_adjust_buf_size(&value.size, misc_mask);
16771                 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
16772                                                (void *)&value, i, actions,
16773                                                &fm->drop_rule[domain]);
16774                 if (ret) {
16775                         DRV_LOG(ERR, "Failed to create meter "
16776                                 "drop rule for drop table.");
16777                                 goto policy_error;
16778                 }
16779         }
16780         return 0;
16781 policy_error:
16782         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16783                 if (fm->drop_rule[i]) {
16784                         claim_zero(mlx5_flow_os_destroy_flow
16785                                 (fm->drop_rule[i]));
16786                         fm->drop_rule[i] = NULL;
16787                 }
16788         }
16789         return -1;
16790 }
16791
16792 static struct mlx5_flow_meter_sub_policy *
16793 __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
16794                 struct mlx5_flow_meter_policy *mtr_policy,
16795                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS],
16796                 struct mlx5_flow_meter_sub_policy *next_sub_policy,
16797                 bool *is_reuse)
16798 {
16799         struct mlx5_priv *priv = dev->data->dev_private;
16800         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16801         uint32_t sub_policy_idx = 0;
16802         uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
16803         uint32_t i, j;
16804         struct mlx5_hrxq *hrxq;
16805         struct mlx5_flow_handle dh;
16806         struct mlx5_meter_policy_action_container *act_cnt;
16807         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16808         uint16_t sub_policy_num;
16809
16810         rte_spinlock_lock(&mtr_policy->sl);
16811         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16812                 if (!rss_desc[i])
16813                         continue;
16814                 hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
16815                 if (!hrxq_idx[i]) {
16816                         rte_spinlock_unlock(&mtr_policy->sl);
16817                         return NULL;
16818                 }
16819         }
16820         sub_policy_num = (mtr_policy->sub_policy_num >>
16821                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16822                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16823         for (j = 0; j < sub_policy_num; j++) {
16824                 for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16825                         if (rss_desc[i] &&
16826                             hrxq_idx[i] !=
16827                             mtr_policy->sub_policys[domain][j]->rix_hrxq[i])
16828                                 break;
16829                 }
16830                 if (i >= MLX5_MTR_RTE_COLORS) {
16831                         /*
16832                          * Found the sub policy table with
16833                          * the same queue per color.
16834                          */
16835                         rte_spinlock_unlock(&mtr_policy->sl);
16836                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16837                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
16838                         *is_reuse = true;
16839                         return mtr_policy->sub_policys[domain][j];
16840                 }
16841         }
16842         /* Create sub policy. */
16843         if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
16844                 /* Reuse the first pre-allocated sub_policy. */
16845                 sub_policy = mtr_policy->sub_policys[domain][0];
16846                 sub_policy_idx = sub_policy->idx;
16847         } else {
16848                 sub_policy = mlx5_ipool_zmalloc
16849                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16850                                  &sub_policy_idx);
16851                 if (!sub_policy ||
16852                     sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
16853                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16854                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
16855                         goto rss_sub_policy_error;
16856                 }
16857                 sub_policy->idx = sub_policy_idx;
16858                 sub_policy->main_policy = mtr_policy;
16859         }
16860         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16861                 if (!rss_desc[i])
16862                         continue;
16863                 sub_policy->rix_hrxq[i] = hrxq_idx[i];
16864                 if (mtr_policy->is_hierarchy) {
16865                         act_cnt = &mtr_policy->act_cnt[i];
16866                         act_cnt->next_sub_policy = next_sub_policy;
16867                         mlx5_hrxq_release(dev, hrxq_idx[i]);
16868                 } else {
16869                         /*
16870                          * Overwrite the last action from
16871                          * RSS action to Queue action.
16872                          */
16873                         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
16874                                               hrxq_idx[i]);
16875                         if (!hrxq) {
16876                                 DRV_LOG(ERR, "Failed to get policy hrxq");
16877                                 goto rss_sub_policy_error;
16878                         }
16879                         act_cnt = &mtr_policy->act_cnt[i];
16880                         if (act_cnt->rix_mark || act_cnt->modify_hdr) {
16881                                 memset(&dh, 0, sizeof(struct mlx5_flow_handle));
16882                                 if (act_cnt->rix_mark)
16883                                         dh.mark = 1;
16884                                 dh.fate_action = MLX5_FLOW_FATE_QUEUE;
16885                                 dh.rix_hrxq = hrxq_idx[i];
16886                                 flow_drv_rxq_flags_set(dev, &dh);
16887                         }
16888                 }
16889         }
16890         if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16891                                                sub_policy, domain)) {
16892                 DRV_LOG(ERR, "Failed to create policy "
16893                         "rules for ingress domain.");
16894                 goto rss_sub_policy_error;
16895         }
16896         if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16897                 i = (mtr_policy->sub_policy_num >>
16898                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16899                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16900                 if (i >= MLX5_MTR_RSS_MAX_SUB_POLICY) {
16901                         DRV_LOG(ERR, "No free sub-policy slot.");
16902                         goto rss_sub_policy_error;
16903                 }
16904                 mtr_policy->sub_policys[domain][i] = sub_policy;
16905                 i++;
16906                 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16907                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16908                 mtr_policy->sub_policy_num |=
16909                         (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16910                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16911         }
16912         rte_spinlock_unlock(&mtr_policy->sl);
16913         *is_reuse = false;
16914         return sub_policy;
16915 rss_sub_policy_error:
16916         if (sub_policy) {
16917                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16918                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16919                         i = (mtr_policy->sub_policy_num >>
16920                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16921                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16922                         mtr_policy->sub_policys[domain][i] = NULL;
16923                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16924                                         sub_policy->idx);
16925                 }
16926         }
16927         rte_spinlock_unlock(&mtr_policy->sl);
16928         return NULL;
16929 }
16930
16931 /**
16932  * Find the policy table for prefix table with RSS.
16933  *
16934  * @param[in] dev
16935  *   Pointer to Ethernet device.
16936  * @param[in] mtr_policy
16937  *   Pointer to meter policy table.
16938  * @param[in] rss_desc
16939  *   Pointer to rss_desc
16940  * @return
16941  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
16942  */
16943 static struct mlx5_flow_meter_sub_policy *
16944 flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
16945                 struct mlx5_flow_meter_policy *mtr_policy,
16946                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
16947 {
16948         struct mlx5_priv *priv = dev->data->dev_private;
16949         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16950         struct mlx5_flow_meter_info *next_fm;
16951         struct mlx5_flow_meter_policy *next_policy;
16952         struct mlx5_flow_meter_sub_policy *next_sub_policy = NULL;
16953         struct mlx5_flow_meter_policy *policies[MLX5_MTR_CHAIN_MAX_NUM];
16954         struct mlx5_flow_meter_sub_policy *sub_policies[MLX5_MTR_CHAIN_MAX_NUM];
16955         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16956         bool reuse_sub_policy;
16957         uint32_t i = 0;
16958         uint32_t j = 0;
16959
16960         while (true) {
16961                 /* Iterate hierarchy to get all policies in this hierarchy. */
16962                 policies[i++] = mtr_policy;
16963                 if (!mtr_policy->is_hierarchy)
16964                         break;
16965                 if (i >= MLX5_MTR_CHAIN_MAX_NUM) {
16966                         DRV_LOG(ERR, "Exceed max meter number in hierarchy.");
16967                         return NULL;
16968                 }
16969                 next_fm = mlx5_flow_meter_find(priv,
16970                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
16971                 if (!next_fm) {
16972                         DRV_LOG(ERR, "Failed to get next meter in hierarchy.");
16973                         return NULL;
16974                 }
16975                 next_policy =
16976                         mlx5_flow_meter_policy_find(dev, next_fm->policy_id,
16977                                                     NULL);
16978                 MLX5_ASSERT(next_policy);
16979                 mtr_policy = next_policy;
16980         }
16981         while (i) {
16982                 /**
16983                  * From last policy to the first one in hierarchy,
16984                  * create / get the sub policy for each of them.
16985                  */
16986                 sub_policy = __flow_dv_meter_get_rss_sub_policy(dev,
16987                                                         policies[--i],
16988                                                         rss_desc,
16989                                                         next_sub_policy,
16990                                                         &reuse_sub_policy);
16991                 if (!sub_policy) {
16992                         DRV_LOG(ERR, "Failed to get the sub policy.");
16993                         goto err_exit;
16994                 }
16995                 if (!reuse_sub_policy)
16996                         sub_policies[j++] = sub_policy;
16997                 next_sub_policy = sub_policy;
16998         }
16999         return sub_policy;
17000 err_exit:
17001         while (j) {
17002                 uint16_t sub_policy_num;
17003
17004                 sub_policy = sub_policies[--j];
17005                 mtr_policy = sub_policy->main_policy;
17006                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
17007                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
17008                         sub_policy_num = (mtr_policy->sub_policy_num >>
17009                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17010                                 MLX5_MTR_SUB_POLICY_NUM_MASK;
17011                         mtr_policy->sub_policys[domain][sub_policy_num - 1] =
17012                                                                         NULL;
17013                         sub_policy_num--;
17014                         mtr_policy->sub_policy_num &=
17015                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17016                                   (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i));
17017                         mtr_policy->sub_policy_num |=
17018                         (sub_policy_num & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17019                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i);
17020                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17021                                         sub_policy->idx);
17022                 }
17023         }
17024         return NULL;
17025 }
17026
17027 /**
17028  * Create the sub policy tag rule for all meters in hierarchy.
17029  *
17030  * @param[in] dev
17031  *   Pointer to Ethernet device.
17032  * @param[in] fm
17033  *   Meter information table.
17034  * @param[in] src_port
17035  *   The src port this extra rule should use.
17036  * @param[in] item
17037  *   The src port match item.
17038  * @param[out] error
17039  *   Perform verbose error reporting if not NULL.
17040  * @return
17041  *   0 on success, a negative errno value otherwise and rte_errno is set.
17042  */
17043 static int
17044 flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
17045                                 struct mlx5_flow_meter_info *fm,
17046                                 int32_t src_port,
17047                                 const struct rte_flow_item *item,
17048                                 struct rte_flow_error *error)
17049 {
17050         struct mlx5_priv *priv = dev->data->dev_private;
17051         struct mlx5_flow_meter_policy *mtr_policy;
17052         struct mlx5_flow_meter_sub_policy *sub_policy;
17053         struct mlx5_flow_meter_info *next_fm = NULL;
17054         struct mlx5_flow_meter_policy *next_policy;
17055         struct mlx5_flow_meter_sub_policy *next_sub_policy;
17056         struct mlx5_flow_tbl_data_entry *tbl_data;
17057         struct mlx5_sub_policy_color_rule *color_rule;
17058         struct mlx5_meter_policy_acts acts;
17059         uint32_t color_reg_c_idx;
17060         bool mtr_first = (src_port != UINT16_MAX) ? true : false;
17061         struct rte_flow_attr attr = {
17062                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
17063                 .priority = 0,
17064                 .ingress = 0,
17065                 .egress = 0,
17066                 .transfer = 1,
17067                 .reserved = 0,
17068         };
17069         uint32_t domain = MLX5_MTR_DOMAIN_TRANSFER;
17070         int i;
17071
17072         mtr_policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
17073         MLX5_ASSERT(mtr_policy);
17074         if (!mtr_policy->is_hierarchy)
17075                 return 0;
17076         next_fm = mlx5_flow_meter_find(priv,
17077                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
17078         if (!next_fm) {
17079                 return rte_flow_error_set(error, EINVAL,
17080                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
17081                                 "Failed to find next meter in hierarchy.");
17082         }
17083         if (!next_fm->drop_cnt)
17084                 goto exit;
17085         color_reg_c_idx = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, error);
17086         sub_policy = mtr_policy->sub_policys[domain][0];
17087         for (i = 0; i < RTE_COLORS; i++) {
17088                 bool rule_exist = false;
17089                 struct mlx5_meter_policy_action_container *act_cnt;
17090
17091                 if (i >= RTE_COLOR_YELLOW)
17092                         break;
17093                 TAILQ_FOREACH(color_rule,
17094                               &sub_policy->color_rules[i], next_port)
17095                         if (color_rule->src_port == src_port) {
17096                                 rule_exist = true;
17097                                 break;
17098                         }
17099                 if (rule_exist)
17100                         continue;
17101                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
17102                                 sizeof(struct mlx5_sub_policy_color_rule),
17103                                 0, SOCKET_ID_ANY);
17104                 if (!color_rule)
17105                         return rte_flow_error_set(error, ENOMEM,
17106                                 RTE_FLOW_ERROR_TYPE_ACTION,
17107                                 NULL, "No memory to create tag color rule.");
17108                 color_rule->src_port = src_port;
17109                 attr.priority = i;
17110                 next_policy = mlx5_flow_meter_policy_find(dev,
17111                                                 next_fm->policy_id, NULL);
17112                 MLX5_ASSERT(next_policy);
17113                 next_sub_policy = next_policy->sub_policys[domain][0];
17114                 tbl_data = container_of(next_sub_policy->tbl_rsc,
17115                                         struct mlx5_flow_tbl_data_entry, tbl);
17116                 act_cnt = &mtr_policy->act_cnt[i];
17117                 if (mtr_first) {
17118                         acts.dv_actions[0] = next_fm->meter_action;
17119                         acts.dv_actions[1] = act_cnt->modify_hdr->action;
17120                 } else {
17121                         acts.dv_actions[0] = act_cnt->modify_hdr->action;
17122                         acts.dv_actions[1] = next_fm->meter_action;
17123                 }
17124                 acts.dv_actions[2] = tbl_data->jump.action;
17125                 acts.actions_n = 3;
17126                 if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) {
17127                         next_fm = NULL;
17128                         goto err_exit;
17129                 }
17130                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
17131                                 MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
17132                                 &attr, true, item,
17133                                 &color_rule->matcher, error)) {
17134                         rte_flow_error_set(error, errno,
17135                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17136                                 "Failed to create hierarchy meter matcher.");
17137                         goto err_exit;
17138                 }
17139                 if (__flow_dv_create_policy_flow(dev, color_reg_c_idx,
17140                                         (enum rte_color)i,
17141                                         color_rule->matcher->matcher_object,
17142                                         acts.actions_n, acts.dv_actions,
17143                                         true, item,
17144                                         &color_rule->rule, &attr)) {
17145                         rte_flow_error_set(error, errno,
17146                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17147                                 "Failed to create hierarchy meter rule.");
17148                         goto err_exit;
17149                 }
17150                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
17151                                   color_rule, next_port);
17152         }
17153 exit:
17154         /**
17155          * Recursive call to iterate all meters in hierarchy and
17156          * create needed rules.
17157          */
17158         return flow_dv_meter_hierarchy_rule_create(dev, next_fm,
17159                                                 src_port, item, error);
17160 err_exit:
17161         if (color_rule) {
17162                 if (color_rule->rule)
17163                         mlx5_flow_os_destroy_flow(color_rule->rule);
17164                 if (color_rule->matcher) {
17165                         struct mlx5_flow_tbl_data_entry *tbl =
17166                                 container_of(color_rule->matcher->tbl,
17167                                                 typeof(*tbl), tbl);
17168                         mlx5_list_unregister(tbl->matchers,
17169                                                 &color_rule->matcher->entry);
17170                 }
17171                 mlx5_free(color_rule);
17172         }
17173         if (next_fm)
17174                 mlx5_flow_meter_detach(priv, next_fm);
17175         return -rte_errno;
17176 }
17177
17178 /**
17179  * Destroy the sub policy table with RX queue.
17180  *
17181  * @param[in] dev
17182  *   Pointer to Ethernet device.
17183  * @param[in] mtr_policy
17184  *   Pointer to meter policy table.
17185  */
17186 static void
17187 flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
17188                                     struct mlx5_flow_meter_policy *mtr_policy)
17189 {
17190         struct mlx5_priv *priv = dev->data->dev_private;
17191         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
17192         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
17193         uint32_t i, j;
17194         uint16_t sub_policy_num, new_policy_num;
17195
17196         rte_spinlock_lock(&mtr_policy->sl);
17197         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
17198                 switch (mtr_policy->act_cnt[i].fate_action) {
17199                 case MLX5_FLOW_FATE_SHARED_RSS:
17200                         sub_policy_num = (mtr_policy->sub_policy_num >>
17201                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
17202                         MLX5_MTR_SUB_POLICY_NUM_MASK;
17203                         new_policy_num = sub_policy_num;
17204                         for (j = 0; j < sub_policy_num; j++) {
17205                                 sub_policy =
17206                                         mtr_policy->sub_policys[domain][j];
17207                                 if (sub_policy) {
17208                                         __flow_dv_destroy_sub_policy_rules(dev,
17209                                                 sub_policy);
17210                                 if (sub_policy !=
17211                                         mtr_policy->sub_policys[domain][0]) {
17212                                         mtr_policy->sub_policys[domain][j] =
17213                                                                 NULL;
17214                                         mlx5_ipool_free
17215                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
17216                                                 sub_policy->idx);
17217                                                 new_policy_num--;
17218                                         }
17219                                 }
17220                         }
17221                         if (new_policy_num != sub_policy_num) {
17222                                 mtr_policy->sub_policy_num &=
17223                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
17224                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
17225                                 mtr_policy->sub_policy_num |=
17226                                 (new_policy_num &
17227                                         MLX5_MTR_SUB_POLICY_NUM_MASK) <<
17228                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
17229                         }
17230                         break;
17231                 case MLX5_FLOW_FATE_QUEUE:
17232                         sub_policy = mtr_policy->sub_policys[domain][0];
17233                         __flow_dv_destroy_sub_policy_rules(dev,
17234                                                            sub_policy);
17235                         break;
17236                 default:
17237                         /*Other actions without queue and do nothing*/
17238                         break;
17239                 }
17240         }
17241         rte_spinlock_unlock(&mtr_policy->sl);
17242 }
17243 /**
17244  * Check whether the DR drop action is supported on the root table or not.
17245  *
17246  * Create a simple flow with DR drop action on root table to validate
17247  * if DR drop action on root table is supported or not.
17248  *
17249  * @param[in] dev
17250  *   Pointer to rte_eth_dev structure.
17251  *
17252  * @return
17253  *   0 on success, a negative errno value otherwise and rte_errno is set.
17254  */
17255 int
17256 mlx5_flow_discover_dr_action_support(struct rte_eth_dev *dev)
17257 {
17258         struct mlx5_priv *priv = dev->data->dev_private;
17259         struct mlx5_dev_ctx_shared *sh = priv->sh;
17260         struct mlx5_flow_dv_match_params mask = {
17261                 .size = sizeof(mask.buf),
17262         };
17263         struct mlx5_flow_dv_match_params value = {
17264                 .size = sizeof(value.buf),
17265         };
17266         struct mlx5dv_flow_matcher_attr dv_attr = {
17267                 .type = IBV_FLOW_ATTR_NORMAL,
17268                 .priority = 0,
17269                 .match_criteria_enable = 0,
17270                 .match_mask = (void *)&mask,
17271         };
17272         struct mlx5_flow_tbl_resource *tbl = NULL;
17273         void *matcher = NULL;
17274         void *flow = NULL;
17275         int ret = -1;
17276
17277         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL,
17278                                         0, 0, 0, NULL);
17279         if (!tbl)
17280                 goto err;
17281         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17282         __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17283         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
17284                                                tbl->obj, &matcher);
17285         if (ret)
17286                 goto err;
17287         __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17288         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17289                                        &sh->dr_drop_action, &flow);
17290 err:
17291         /*
17292          * If DR drop action is not supported on root table, flow create will
17293          * be failed with EOPNOTSUPP or EPROTONOSUPPORT.
17294          */
17295         if (!flow) {
17296                 if (matcher &&
17297                     (errno == EPROTONOSUPPORT || errno == EOPNOTSUPP))
17298                         DRV_LOG(INFO, "DR drop action is not supported in root table.");
17299                 else
17300                         DRV_LOG(ERR, "Unexpected error in DR drop action support detection");
17301                 ret = -1;
17302         } else {
17303                 claim_zero(mlx5_flow_os_destroy_flow(flow));
17304         }
17305         if (matcher)
17306                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
17307         if (tbl)
17308                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
17309         return ret;
17310 }
17311
17312 /**
17313  * Validate the batch counter support in root table.
17314  *
17315  * Create a simple flow with invalid counter and drop action on root table to
17316  * validate if batch counter with offset on root table is supported or not.
17317  *
17318  * @param[in] dev
17319  *   Pointer to rte_eth_dev structure.
17320  *
17321  * @return
17322  *   0 on success, a negative errno value otherwise and rte_errno is set.
17323  */
17324 int
17325 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
17326 {
17327         struct mlx5_priv *priv = dev->data->dev_private;
17328         struct mlx5_dev_ctx_shared *sh = priv->sh;
17329         struct mlx5_flow_dv_match_params mask = {
17330                 .size = sizeof(mask.buf),
17331         };
17332         struct mlx5_flow_dv_match_params value = {
17333                 .size = sizeof(value.buf),
17334         };
17335         struct mlx5dv_flow_matcher_attr dv_attr = {
17336                 .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
17337                 .priority = 0,
17338                 .match_criteria_enable = 0,
17339                 .match_mask = (void *)&mask,
17340         };
17341         void *actions[2] = { 0 };
17342         struct mlx5_flow_tbl_resource *tbl = NULL;
17343         struct mlx5_devx_obj *dcs = NULL;
17344         void *matcher = NULL;
17345         void *flow = NULL;
17346         int ret = -1;
17347
17348         tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
17349                                         0, 0, 0, NULL);
17350         if (!tbl)
17351                 goto err;
17352         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
17353         if (!dcs)
17354                 goto err;
17355         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
17356                                                     &actions[0]);
17357         if (ret)
17358                 goto err;
17359         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17360         __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17361         ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
17362                                                tbl->obj, &matcher);
17363         if (ret)
17364                 goto err;
17365         __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17366         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17367                                        actions, &flow);
17368 err:
17369         /*
17370          * If batch counter with offset is not supported, the driver will not
17371          * validate the invalid offset value, flow create should success.
17372          * In this case, it means batch counter is not supported in root table.
17373          *
17374          * Otherwise, if flow create is failed, counter offset is supported.
17375          */
17376         if (flow) {
17377                 DRV_LOG(INFO, "Batch counter is not supported in root "
17378                               "table. Switch to fallback mode.");
17379                 rte_errno = ENOTSUP;
17380                 ret = -rte_errno;
17381                 claim_zero(mlx5_flow_os_destroy_flow(flow));
17382         } else {
17383                 /* Check matcher to make sure validate fail at flow create. */
17384                 if (!matcher || (matcher && errno != EINVAL))
17385                         DRV_LOG(ERR, "Unexpected error in counter offset "
17386                                      "support detection");
17387                 ret = 0;
17388         }
17389         if (actions[0])
17390                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
17391         if (matcher)
17392                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
17393         if (tbl)
17394                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
17395         if (dcs)
17396                 claim_zero(mlx5_devx_cmd_destroy(dcs));
17397         return ret;
17398 }
17399
17400 /**
17401  * Query a devx counter.
17402  *
17403  * @param[in] dev
17404  *   Pointer to the Ethernet device structure.
17405  * @param[in] cnt
17406  *   Index to the flow counter.
17407  * @param[in] clear
17408  *   Set to clear the counter statistics.
17409  * @param[out] pkts
17410  *   The statistics value of packets.
17411  * @param[out] bytes
17412  *   The statistics value of bytes.
17413  *
17414  * @return
17415  *   0 on success, otherwise return -1.
17416  */
17417 static int
17418 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
17419                       uint64_t *pkts, uint64_t *bytes)
17420 {
17421         struct mlx5_priv *priv = dev->data->dev_private;
17422         struct mlx5_flow_counter *cnt;
17423         uint64_t inn_pkts, inn_bytes;
17424         int ret;
17425
17426         if (!priv->sh->devx)
17427                 return -1;
17428
17429         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
17430         if (ret)
17431                 return -1;
17432         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
17433         *pkts = inn_pkts - cnt->hits;
17434         *bytes = inn_bytes - cnt->bytes;
17435         if (clear) {
17436                 cnt->hits = inn_pkts;
17437                 cnt->bytes = inn_bytes;
17438         }
17439         return 0;
17440 }
17441
17442 /**
17443  * Get aged-out flows.
17444  *
17445  * @param[in] dev
17446  *   Pointer to the Ethernet device structure.
17447  * @param[in] context
17448  *   The address of an array of pointers to the aged-out flows contexts.
17449  * @param[in] nb_contexts
17450  *   The length of context array pointers.
17451  * @param[out] error
17452  *   Perform verbose error reporting if not NULL. Initialized in case of
17453  *   error only.
17454  *
17455  * @return
17456  *   how many contexts get in success, otherwise negative errno value.
17457  *   if nb_contexts is 0, return the amount of all aged contexts.
17458  *   if nb_contexts is not 0 , return the amount of aged flows reported
17459  *   in the context array.
17460  * @note: only stub for now
17461  */
17462 static int
17463 flow_dv_get_aged_flows(struct rte_eth_dev *dev,
17464                     void **context,
17465                     uint32_t nb_contexts,
17466                     struct rte_flow_error *error)
17467 {
17468         struct mlx5_priv *priv = dev->data->dev_private;
17469         struct mlx5_age_info *age_info;
17470         struct mlx5_age_param *age_param;
17471         struct mlx5_flow_counter *counter;
17472         struct mlx5_aso_age_action *act;
17473         int nb_flows = 0;
17474
17475         if (nb_contexts && !context)
17476                 return rte_flow_error_set(error, EINVAL,
17477                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17478                                           NULL, "empty context");
17479         age_info = GET_PORT_AGE_INFO(priv);
17480         rte_spinlock_lock(&age_info->aged_sl);
17481         LIST_FOREACH(act, &age_info->aged_aso, next) {
17482                 nb_flows++;
17483                 if (nb_contexts) {
17484                         context[nb_flows - 1] =
17485                                                 act->age_params.context;
17486                         if (!(--nb_contexts))
17487                                 break;
17488                 }
17489         }
17490         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
17491                 nb_flows++;
17492                 if (nb_contexts) {
17493                         age_param = MLX5_CNT_TO_AGE(counter);
17494                         context[nb_flows - 1] = age_param->context;
17495                         if (!(--nb_contexts))
17496                                 break;
17497                 }
17498         }
17499         rte_spinlock_unlock(&age_info->aged_sl);
17500         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
17501         return nb_flows;
17502 }
17503
17504 /*
17505  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
17506  */
17507 static uint32_t
17508 flow_dv_counter_allocate(struct rte_eth_dev *dev)
17509 {
17510         return flow_dv_counter_alloc(dev, 0);
17511 }
17512
17513 /**
17514  * Validate indirect action.
17515  * Dispatcher for action type specific validation.
17516  *
17517  * @param[in] dev
17518  *   Pointer to the Ethernet device structure.
17519  * @param[in] conf
17520  *   Indirect action configuration.
17521  * @param[in] action
17522  *   The indirect action object to validate.
17523  * @param[out] error
17524  *   Perform verbose error reporting if not NULL. Initialized in case of
17525  *   error only.
17526  *
17527  * @return
17528  *   0 on success, otherwise negative errno value.
17529  */
17530 static int
17531 flow_dv_action_validate(struct rte_eth_dev *dev,
17532                         const struct rte_flow_indir_action_conf *conf,
17533                         const struct rte_flow_action *action,
17534                         struct rte_flow_error *err)
17535 {
17536         struct mlx5_priv *priv = dev->data->dev_private;
17537
17538         RTE_SET_USED(conf);
17539         switch (action->type) {
17540         case RTE_FLOW_ACTION_TYPE_RSS:
17541                 /*
17542                  * priv->obj_ops is set according to driver capabilities.
17543                  * When DevX capabilities are
17544                  * sufficient, it is set to devx_obj_ops.
17545                  * Otherwise, it is set to ibv_obj_ops.
17546                  * ibv_obj_ops doesn't support ind_table_modify operation.
17547                  * In this case the indirect RSS action can't be used.
17548                  */
17549                 if (priv->obj_ops.ind_table_modify == NULL)
17550                         return rte_flow_error_set
17551                                         (err, ENOTSUP,
17552                                          RTE_FLOW_ERROR_TYPE_ACTION,
17553                                          NULL,
17554                                          "Indirect RSS action not supported");
17555                 return mlx5_validate_action_rss(dev, action, err);
17556         case RTE_FLOW_ACTION_TYPE_AGE:
17557                 if (!priv->sh->aso_age_mng)
17558                         return rte_flow_error_set(err, ENOTSUP,
17559                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17560                                                 NULL,
17561                                                 "Indirect age action not supported");
17562                 return flow_dv_validate_action_age(0, action, dev, err);
17563         case RTE_FLOW_ACTION_TYPE_COUNT:
17564                 return flow_dv_validate_action_count(dev, true, 0, err);
17565         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
17566                 if (!priv->sh->ct_aso_en)
17567                         return rte_flow_error_set(err, ENOTSUP,
17568                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17569                                         "ASO CT is not supported");
17570                 return mlx5_validate_action_ct(dev, action->conf, err);
17571         default:
17572                 return rte_flow_error_set(err, ENOTSUP,
17573                                           RTE_FLOW_ERROR_TYPE_ACTION,
17574                                           NULL,
17575                                           "action type not supported");
17576         }
17577 }
17578
17579 /*
17580  * Check if the RSS configurations for colors of a meter policy match
17581  * each other, except the queues.
17582  *
17583  * @param[in] r1
17584  *   Pointer to the first RSS flow action.
17585  * @param[in] r2
17586  *   Pointer to the second RSS flow action.
17587  *
17588  * @return
17589  *   0 on match, 1 on conflict.
17590  */
17591 static inline int
17592 flow_dv_mtr_policy_rss_compare(const struct rte_flow_action_rss *r1,
17593                                const struct rte_flow_action_rss *r2)
17594 {
17595         if (r1 == NULL || r2 == NULL)
17596                 return 0;
17597         if (!(r1->level <= 1 && r2->level <= 1) &&
17598             !(r1->level > 1 && r2->level > 1))
17599                 return 1;
17600         if (r1->types != r2->types &&
17601             !((r1->types == 0 || r1->types == RTE_ETH_RSS_IP) &&
17602               (r2->types == 0 || r2->types == RTE_ETH_RSS_IP)))
17603                 return 1;
17604         if (r1->key || r2->key) {
17605                 const void *key1 = r1->key ? r1->key : rss_hash_default_key;
17606                 const void *key2 = r2->key ? r2->key : rss_hash_default_key;
17607
17608                 if (memcmp(key1, key2, MLX5_RSS_HASH_KEY_LEN))
17609                         return 1;
17610         }
17611         return 0;
17612 }
17613
17614 /**
17615  * Validate the meter hierarchy chain for meter policy.
17616  *
17617  * @param[in] dev
17618  *   Pointer to the Ethernet device structure.
17619  * @param[in] meter_id
17620  *   Meter id.
17621  * @param[in] action_flags
17622  *   Holds the actions detected until now.
17623  * @param[out] is_rss
17624  *   Is RSS or not.
17625  * @param[out] hierarchy_domain
17626  *   The domain bitmap for hierarchy policy.
17627  * @param[out] error
17628  *   Perform verbose error reporting if not NULL. Initialized in case of
17629  *   error only.
17630  *
17631  * @return
17632  *   0 on success, otherwise negative errno value with error set.
17633  */
17634 static int
17635 flow_dv_validate_policy_mtr_hierarchy(struct rte_eth_dev *dev,
17636                                   uint32_t meter_id,
17637                                   uint64_t action_flags,
17638                                   bool *is_rss,
17639                                   uint8_t *hierarchy_domain,
17640                                   struct rte_mtr_error *error)
17641 {
17642         struct mlx5_priv *priv = dev->data->dev_private;
17643         struct mlx5_flow_meter_info *fm;
17644         struct mlx5_flow_meter_policy *policy;
17645         uint8_t cnt = 1;
17646
17647         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
17648                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
17649                 return -rte_mtr_error_set(error, EINVAL,
17650                                         RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN,
17651                                         NULL,
17652                                         "Multiple fate actions not supported.");
17653         *hierarchy_domain = 0;
17654         while (true) {
17655                 fm = mlx5_flow_meter_find(priv, meter_id, NULL);
17656                 if (!fm)
17657                         return -rte_mtr_error_set(error, EINVAL,
17658                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17659                                         "Meter not found in meter hierarchy.");
17660                 if (fm->def_policy)
17661                         return -rte_mtr_error_set(error, EINVAL,
17662                                         RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17663                         "Non termination meter not supported in hierarchy.");
17664                 policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
17665                 MLX5_ASSERT(policy);
17666                 /**
17667                  * Only inherit the supported domains of the first meter in
17668                  * hierarchy.
17669                  * One meter supports at least one domain.
17670                  */
17671                 if (!*hierarchy_domain) {
17672                         if (policy->transfer)
17673                                 *hierarchy_domain |=
17674                                                 MLX5_MTR_DOMAIN_TRANSFER_BIT;
17675                         if (policy->ingress)
17676                                 *hierarchy_domain |=
17677                                                 MLX5_MTR_DOMAIN_INGRESS_BIT;
17678                         if (policy->egress)
17679                                 *hierarchy_domain |= MLX5_MTR_DOMAIN_EGRESS_BIT;
17680                 }
17681                 if (!policy->is_hierarchy) {
17682                         *is_rss = policy->is_rss;
17683                         break;
17684                 }
17685                 meter_id = policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id;
17686                 if (++cnt >= MLX5_MTR_CHAIN_MAX_NUM)
17687                         return -rte_mtr_error_set(error, EINVAL,
17688                                         RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
17689                                         "Exceed max hierarchy meter number.");
17690         }
17691         return 0;
17692 }
17693
17694 /**
17695  * Validate meter policy actions.
17696  * Dispatcher for action type specific validation.
17697  *
17698  * @param[in] dev
17699  *   Pointer to the Ethernet device structure.
17700  * @param[in] action
17701  *   The meter policy action object to validate.
17702  * @param[in] attr
17703  *   Attributes of flow to determine steering domain.
17704  * @param[out] error
17705  *   Perform verbose error reporting if not NULL. Initialized in case of
17706  *   error only.
17707  *
17708  * @return
17709  *   0 on success, otherwise negative errno value.
17710  */
17711 static int
17712 flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
17713                         const struct rte_flow_action *actions[RTE_COLORS],
17714                         struct rte_flow_attr *attr,
17715                         bool *is_rss,
17716                         uint8_t *domain_bitmap,
17717                         uint8_t *policy_mode,
17718                         struct rte_mtr_error *error)
17719 {
17720         struct mlx5_priv *priv = dev->data->dev_private;
17721         struct mlx5_dev_config *dev_conf = &priv->config;
17722         const struct rte_flow_action *act;
17723         uint64_t action_flags[RTE_COLORS] = {0};
17724         int actions_n;
17725         int i, ret;
17726         struct rte_flow_error flow_err;
17727         uint8_t domain_color[RTE_COLORS] = {0};
17728         uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
17729         uint8_t hierarchy_domain = 0;
17730         const struct rte_flow_action_meter *mtr;
17731         bool def_green = false;
17732         bool def_yellow = false;
17733         const struct rte_flow_action_rss *rss_color[RTE_COLORS] = {NULL};
17734
17735         if (!priv->config.dv_esw_en)
17736                 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
17737         *domain_bitmap = def_domain;
17738         /* Red color could only support DROP action. */
17739         if (!actions[RTE_COLOR_RED] ||
17740             actions[RTE_COLOR_RED]->type != RTE_FLOW_ACTION_TYPE_DROP)
17741                 return -rte_mtr_error_set(error, ENOTSUP,
17742                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17743                                 NULL, "Red color only supports drop action.");
17744         /*
17745          * Check default policy actions:
17746          * Green / Yellow: no action, Red: drop action
17747          * Either G or Y will trigger default policy actions to be created.
17748          */
17749         if (!actions[RTE_COLOR_GREEN] ||
17750             actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)
17751                 def_green = true;
17752         if (!actions[RTE_COLOR_YELLOW] ||
17753             actions[RTE_COLOR_YELLOW]->type == RTE_FLOW_ACTION_TYPE_END)
17754                 def_yellow = true;
17755         if (def_green && def_yellow) {
17756                 *policy_mode = MLX5_MTR_POLICY_MODE_DEF;
17757                 return 0;
17758         } else if (!def_green && def_yellow) {
17759                 *policy_mode = MLX5_MTR_POLICY_MODE_OG;
17760         } else if (def_green && !def_yellow) {
17761                 *policy_mode = MLX5_MTR_POLICY_MODE_OY;
17762         } else {
17763                 *policy_mode = MLX5_MTR_POLICY_MODE_ALL;
17764         }
17765         /* Set to empty string in case of NULL pointer access by user. */
17766         flow_err.message = "";
17767         for (i = 0; i < RTE_COLORS; i++) {
17768                 act = actions[i];
17769                 for (action_flags[i] = 0, actions_n = 0;
17770                      act && act->type != RTE_FLOW_ACTION_TYPE_END;
17771                      act++) {
17772                         if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
17773                                 return -rte_mtr_error_set(error, ENOTSUP,
17774                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17775                                           NULL, "too many actions");
17776                         switch (act->type) {
17777                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
17778                         case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
17779                                 if (!priv->config.dv_esw_en)
17780                                         return -rte_mtr_error_set(error,
17781                                         ENOTSUP,
17782                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17783                                         NULL, "PORT action validate check"
17784                                         " fail for ESW disable");
17785                                 ret = flow_dv_validate_action_port_id(dev,
17786                                                 action_flags[i],
17787                                                 act, attr, &flow_err);
17788                                 if (ret)
17789                                         return -rte_mtr_error_set(error,
17790                                         ENOTSUP,
17791                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17792                                         NULL, flow_err.message ?
17793                                         flow_err.message :
17794                                         "PORT action validate check fail");
17795                                 ++actions_n;
17796                                 action_flags[i] |= MLX5_FLOW_ACTION_PORT_ID;
17797                                 break;
17798                         case RTE_FLOW_ACTION_TYPE_MARK:
17799                                 ret = flow_dv_validate_action_mark(dev, act,
17800                                                            action_flags[i],
17801                                                            attr, &flow_err);
17802                                 if (ret < 0)
17803                                         return -rte_mtr_error_set(error,
17804                                         ENOTSUP,
17805                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17806                                         NULL, flow_err.message ?
17807                                         flow_err.message :
17808                                         "Mark action validate check fail");
17809                                 if (dev_conf->dv_xmeta_en !=
17810                                         MLX5_XMETA_MODE_LEGACY)
17811                                         return -rte_mtr_error_set(error,
17812                                         ENOTSUP,
17813                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17814                                         NULL, "Extend MARK action is "
17815                                         "not supported. Please try use "
17816                                         "default policy for meter.");
17817                                 action_flags[i] |= MLX5_FLOW_ACTION_MARK;
17818                                 ++actions_n;
17819                                 break;
17820                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
17821                                 ret = flow_dv_validate_action_set_tag(dev,
17822                                                         act, action_flags[i],
17823                                                         attr, &flow_err);
17824                                 if (ret)
17825                                         return -rte_mtr_error_set(error,
17826                                         ENOTSUP,
17827                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17828                                         NULL, flow_err.message ?
17829                                         flow_err.message :
17830                                         "Set tag action validate check fail");
17831                                 action_flags[i] |= MLX5_FLOW_ACTION_SET_TAG;
17832                                 ++actions_n;
17833                                 break;
17834                         case RTE_FLOW_ACTION_TYPE_DROP:
17835                                 ret = mlx5_flow_validate_action_drop
17836                                         (action_flags[i], attr, &flow_err);
17837                                 if (ret < 0)
17838                                         return -rte_mtr_error_set(error,
17839                                         ENOTSUP,
17840                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17841                                         NULL, flow_err.message ?
17842                                         flow_err.message :
17843                                         "Drop action validate check fail");
17844                                 action_flags[i] |= MLX5_FLOW_ACTION_DROP;
17845                                 ++actions_n;
17846                                 break;
17847                         case RTE_FLOW_ACTION_TYPE_QUEUE:
17848                                 /*
17849                                  * Check whether extensive
17850                                  * metadata feature is engaged.
17851                                  */
17852                                 if (dev_conf->dv_flow_en &&
17853                                     (dev_conf->dv_xmeta_en !=
17854                                      MLX5_XMETA_MODE_LEGACY) &&
17855                                     mlx5_flow_ext_mreg_supported(dev))
17856                                         return -rte_mtr_error_set(error,
17857                                           ENOTSUP,
17858                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17859                                           NULL, "Queue action with meta "
17860                                           "is not supported. Please try use "
17861                                           "default policy for meter.");
17862                                 ret = mlx5_flow_validate_action_queue(act,
17863                                                         action_flags[i], dev,
17864                                                         attr, &flow_err);
17865                                 if (ret < 0)
17866                                         return -rte_mtr_error_set(error,
17867                                           ENOTSUP,
17868                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17869                                           NULL, flow_err.message ?
17870                                           flow_err.message :
17871                                           "Queue action validate check fail");
17872                                 action_flags[i] |= MLX5_FLOW_ACTION_QUEUE;
17873                                 ++actions_n;
17874                                 break;
17875                         case RTE_FLOW_ACTION_TYPE_RSS:
17876                                 if (dev_conf->dv_flow_en &&
17877                                     (dev_conf->dv_xmeta_en !=
17878                                      MLX5_XMETA_MODE_LEGACY) &&
17879                                     mlx5_flow_ext_mreg_supported(dev))
17880                                         return -rte_mtr_error_set(error,
17881                                           ENOTSUP,
17882                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17883                                           NULL, "RSS action with meta "
17884                                           "is not supported. Please try use "
17885                                           "default policy for meter.");
17886                                 ret = mlx5_validate_action_rss(dev, act,
17887                                                                &flow_err);
17888                                 if (ret < 0)
17889                                         return -rte_mtr_error_set(error,
17890                                           ENOTSUP,
17891                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17892                                           NULL, flow_err.message ?
17893                                           flow_err.message :
17894                                           "RSS action validate check fail");
17895                                 action_flags[i] |= MLX5_FLOW_ACTION_RSS;
17896                                 ++actions_n;
17897                                 /* Either G or Y will set the RSS. */
17898                                 rss_color[i] = act->conf;
17899                                 break;
17900                         case RTE_FLOW_ACTION_TYPE_JUMP:
17901                                 ret = flow_dv_validate_action_jump(dev,
17902                                         NULL, act, action_flags[i],
17903                                         attr, true, &flow_err);
17904                                 if (ret)
17905                                         return -rte_mtr_error_set(error,
17906                                           ENOTSUP,
17907                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17908                                           NULL, flow_err.message ?
17909                                           flow_err.message :
17910                                           "Jump action validate check fail");
17911                                 ++actions_n;
17912                                 action_flags[i] |= MLX5_FLOW_ACTION_JUMP;
17913                                 break;
17914                         /*
17915                          * Only the last meter in the hierarchy will support
17916                          * the YELLOW color steering. Then in the meter policy
17917                          * actions list, there should be no other meter inside.
17918                          */
17919                         case RTE_FLOW_ACTION_TYPE_METER:
17920                                 if (i != RTE_COLOR_GREEN)
17921                                         return -rte_mtr_error_set(error,
17922                                                 ENOTSUP,
17923                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17924                                                 NULL,
17925                                                 "Meter hierarchy only supports GREEN color.");
17926                                 if (*policy_mode != MLX5_MTR_POLICY_MODE_OG)
17927                                         return -rte_mtr_error_set(error,
17928                                                 ENOTSUP,
17929                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17930                                                 NULL,
17931                                                 "No yellow policy should be provided in meter hierarchy.");
17932                                 mtr = act->conf;
17933                                 ret = flow_dv_validate_policy_mtr_hierarchy(dev,
17934                                                         mtr->mtr_id,
17935                                                         action_flags[i],
17936                                                         is_rss,
17937                                                         &hierarchy_domain,
17938                                                         error);
17939                                 if (ret)
17940                                         return ret;
17941                                 ++actions_n;
17942                                 action_flags[i] |=
17943                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
17944                                 break;
17945                         default:
17946                                 return -rte_mtr_error_set(error, ENOTSUP,
17947                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17948                                         NULL,
17949                                         "Doesn't support optional action");
17950                         }
17951                 }
17952                 if (action_flags[i] & MLX5_FLOW_ACTION_PORT_ID) {
17953                         domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
17954                 } else if ((action_flags[i] &
17955                           (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
17956                           (action_flags[i] & MLX5_FLOW_ACTION_MARK)) {
17957                         /*
17958                          * Only support MLX5_XMETA_MODE_LEGACY
17959                          * so MARK action is only in ingress domain.
17960                          */
17961                         domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
17962                 } else {
17963                         domain_color[i] = def_domain;
17964                         if (action_flags[i] &&
17965                             !(action_flags[i] & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
17966                                 domain_color[i] &=
17967                                 ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
17968                 }
17969                 if (action_flags[i] &
17970                     MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
17971                         domain_color[i] &= hierarchy_domain;
17972                 /*
17973                  * Non-termination actions only support NIC Tx domain.
17974                  * The adjustion should be skipped when there is no
17975                  * action or only END is provided. The default domains
17976                  * bit-mask is set to find the MIN intersection.
17977                  * The action flags checking should also be skipped.
17978                  */
17979                 if ((def_green && i == RTE_COLOR_GREEN) ||
17980                     (def_yellow && i == RTE_COLOR_YELLOW))
17981                         continue;
17982                 /*
17983                  * Validate the drop action mutual exclusion
17984                  * with other actions. Drop action is mutually-exclusive
17985                  * with any other action, except for Count action.
17986                  */
17987                 if ((action_flags[i] & MLX5_FLOW_ACTION_DROP) &&
17988                     (action_flags[i] & ~MLX5_FLOW_ACTION_DROP)) {
17989                         return -rte_mtr_error_set(error, ENOTSUP,
17990                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17991                                 NULL, "Drop action is mutually-exclusive "
17992                                 "with any other action");
17993                 }
17994                 /* Eswitch has few restrictions on using items and actions */
17995                 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
17996                         if (!mlx5_flow_ext_mreg_supported(dev) &&
17997                             action_flags[i] & MLX5_FLOW_ACTION_MARK)
17998                                 return -rte_mtr_error_set(error, ENOTSUP,
17999                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18000                                         NULL, "unsupported action MARK");
18001                         if (action_flags[i] & MLX5_FLOW_ACTION_QUEUE)
18002                                 return -rte_mtr_error_set(error, ENOTSUP,
18003                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18004                                         NULL, "unsupported action QUEUE");
18005                         if (action_flags[i] & MLX5_FLOW_ACTION_RSS)
18006                                 return -rte_mtr_error_set(error, ENOTSUP,
18007                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18008                                         NULL, "unsupported action RSS");
18009                         if (!(action_flags[i] & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
18010                                 return -rte_mtr_error_set(error, ENOTSUP,
18011                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
18012                                         NULL, "no fate action is found");
18013                 } else {
18014                         if (!(action_flags[i] & MLX5_FLOW_FATE_ACTIONS) &&
18015                             (domain_color[i] & MLX5_MTR_DOMAIN_INGRESS_BIT)) {
18016                                 if ((domain_color[i] &
18017                                      MLX5_MTR_DOMAIN_EGRESS_BIT))
18018                                         domain_color[i] =
18019                                                 MLX5_MTR_DOMAIN_EGRESS_BIT;
18020                                 else
18021                                         return -rte_mtr_error_set(error,
18022                                                 ENOTSUP,
18023                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
18024                                                 NULL,
18025                                                 "no fate action is found");
18026                         }
18027                 }
18028         }
18029         /* If both colors have RSS, the attributes should be the same. */
18030         if (flow_dv_mtr_policy_rss_compare(rss_color[RTE_COLOR_GREEN],
18031                                            rss_color[RTE_COLOR_YELLOW]))
18032                 return -rte_mtr_error_set(error, EINVAL,
18033                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
18034                                           NULL, "policy RSS attr conflict");
18035         if (rss_color[RTE_COLOR_GREEN] || rss_color[RTE_COLOR_YELLOW])
18036                 *is_rss = true;
18037         /* "domain_color[C]" is non-zero for each color, default is ALL. */
18038         if (!def_green && !def_yellow &&
18039             domain_color[RTE_COLOR_GREEN] != domain_color[RTE_COLOR_YELLOW] &&
18040             !(action_flags[RTE_COLOR_GREEN] & MLX5_FLOW_ACTION_DROP) &&
18041             !(action_flags[RTE_COLOR_YELLOW] & MLX5_FLOW_ACTION_DROP))
18042                 return -rte_mtr_error_set(error, EINVAL,
18043                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
18044                                           NULL, "policy domains conflict");
18045         /*
18046          * At least one color policy is listed in the actions, the domains
18047          * to be supported should be the intersection.
18048          */
18049         *domain_bitmap = domain_color[RTE_COLOR_GREEN] &
18050                          domain_color[RTE_COLOR_YELLOW];
18051         return 0;
18052 }
18053
18054 static int
18055 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
18056 {
18057         struct mlx5_priv *priv = dev->data->dev_private;
18058         int ret = 0;
18059
18060         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
18061                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
18062                                                 flags);
18063                 if (ret != 0)
18064                         return ret;
18065         }
18066         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
18067                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
18068                 if (ret != 0)
18069                         return ret;
18070         }
18071         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
18072                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
18073                 if (ret != 0)
18074                         return ret;
18075         }
18076         return 0;
18077 }
18078
18079 /**
18080  * Discover the number of available flow priorities
18081  * by trying to create a flow with the highest priority value
18082  * for each possible number.
18083  *
18084  * @param[in] dev
18085  *   Ethernet device.
18086  * @param[in] vprio
18087  *   List of possible number of available priorities.
18088  * @param[in] vprio_n
18089  *   Size of @p vprio array.
18090  * @return
18091  *   On success, number of available flow priorities.
18092  *   On failure, a negative errno-style code and rte_errno is set.
18093  */
18094 static int
18095 flow_dv_discover_priorities(struct rte_eth_dev *dev,
18096                             const uint16_t *vprio, int vprio_n)
18097 {
18098         struct mlx5_priv *priv = dev->data->dev_private;
18099         struct mlx5_indexed_pool *pool = priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW];
18100         struct rte_flow_item_eth eth;
18101         struct rte_flow_item item = {
18102                 .type = RTE_FLOW_ITEM_TYPE_ETH,
18103                 .spec = &eth,
18104                 .mask = &eth,
18105         };
18106         struct mlx5_flow_dv_matcher matcher = {
18107                 .mask = {
18108                         .size = sizeof(matcher.mask.buf),
18109                 },
18110         };
18111         union mlx5_flow_tbl_key tbl_key;
18112         struct mlx5_flow flow;
18113         void *action;
18114         struct rte_flow_error error;
18115         uint8_t misc_mask;
18116         int i, err, ret = -ENOTSUP;
18117
18118         /*
18119          * Prepare a flow with a catch-all pattern and a drop action.
18120          * Use drop queue, because shared drop action may be unavailable.
18121          */
18122         action = priv->drop_queue.hrxq->action;
18123         if (action == NULL) {
18124                 DRV_LOG(ERR, "Priority discovery requires a drop action");
18125                 rte_errno = ENOTSUP;
18126                 return -rte_errno;
18127         }
18128         memset(&flow, 0, sizeof(flow));
18129         flow.handle = mlx5_ipool_zmalloc(pool, &flow.handle_idx);
18130         if (flow.handle == NULL) {
18131                 DRV_LOG(ERR, "Cannot create flow handle");
18132                 rte_errno = ENOMEM;
18133                 return -rte_errno;
18134         }
18135         flow.ingress = true;
18136         flow.dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
18137         flow.dv.actions[0] = action;
18138         flow.dv.actions_n = 1;
18139         memset(&eth, 0, sizeof(eth));
18140         flow_dv_translate_item_eth(matcher.mask.buf, flow.dv.value.buf,
18141                                    &item, /* inner */ false, /* group */ 0);
18142         matcher.crc = rte_raw_cksum(matcher.mask.buf, matcher.mask.size);
18143         for (i = 0; i < vprio_n; i++) {
18144                 /* Configure the next proposed maximum priority. */
18145                 matcher.priority = vprio[i] - 1;
18146                 memset(&tbl_key, 0, sizeof(tbl_key));
18147                 err = flow_dv_matcher_register(dev, &matcher, &tbl_key, &flow,
18148                                                /* tunnel */ NULL,
18149                                                /* group */ 0,
18150                                                &error);
18151                 if (err != 0) {
18152                         /* This action is pure SW and must always succeed. */
18153                         DRV_LOG(ERR, "Cannot register matcher");
18154                         ret = -rte_errno;
18155                         break;
18156                 }
18157                 /* Try to apply the flow to HW. */
18158                 misc_mask = flow_dv_matcher_enable(flow.dv.value.buf);
18159                 __flow_dv_adjust_buf_size(&flow.dv.value.size, misc_mask);
18160                 err = mlx5_flow_os_create_flow
18161                                 (flow.handle->dvh.matcher->matcher_object,
18162                                  (void *)&flow.dv.value, flow.dv.actions_n,
18163                                  flow.dv.actions, &flow.handle->drv_flow);
18164                 if (err == 0) {
18165                         claim_zero(mlx5_flow_os_destroy_flow
18166                                                 (flow.handle->drv_flow));
18167                         flow.handle->drv_flow = NULL;
18168                 }
18169                 claim_zero(flow_dv_matcher_release(dev, flow.handle));
18170                 if (err != 0)
18171                         break;
18172                 ret = vprio[i];
18173         }
18174         mlx5_ipool_free(pool, flow.handle_idx);
18175         /* Set rte_errno if no expected priority value matched. */
18176         if (ret < 0)
18177                 rte_errno = -ret;
18178         return ret;
18179 }
18180
18181 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
18182         .validate = flow_dv_validate,
18183         .prepare = flow_dv_prepare,
18184         .translate = flow_dv_translate,
18185         .apply = flow_dv_apply,
18186         .remove = flow_dv_remove,
18187         .destroy = flow_dv_destroy,
18188         .query = flow_dv_query,
18189         .create_mtr_tbls = flow_dv_create_mtr_tbls,
18190         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
18191         .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
18192         .create_meter = flow_dv_mtr_alloc,
18193         .free_meter = flow_dv_aso_mtr_release_to_pool,
18194         .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
18195         .create_mtr_acts = flow_dv_create_mtr_policy_acts,
18196         .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
18197         .create_policy_rules = flow_dv_create_policy_rules,
18198         .destroy_policy_rules = flow_dv_destroy_policy_rules,
18199         .create_def_policy = flow_dv_create_def_policy,
18200         .destroy_def_policy = flow_dv_destroy_def_policy,
18201         .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
18202         .meter_hierarchy_rule_create = flow_dv_meter_hierarchy_rule_create,
18203         .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
18204         .counter_alloc = flow_dv_counter_allocate,
18205         .counter_free = flow_dv_counter_free,
18206         .counter_query = flow_dv_counter_query,
18207         .get_aged_flows = flow_dv_get_aged_flows,
18208         .action_validate = flow_dv_action_validate,
18209         .action_create = flow_dv_action_create,
18210         .action_destroy = flow_dv_action_destroy,
18211         .action_update = flow_dv_action_update,
18212         .action_query = flow_dv_action_query,
18213         .sync_domain = flow_dv_sync_domain,
18214         .discover_priorities = flow_dv_discover_priorities,
18215         .item_create = flow_dv_item_create,
18216         .item_release = flow_dv_item_release,
18217 };
18218
18219 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
18220